diff --git a/152334H/miqu-1-70b-sf/raw_2024-05-23T11-20-45.843993/results.json b/152334H/miqu-1-70b-sf/raw_2024-05-23T11-20-45.843993/results.json index 170dca1520eba404a02fb28bdf8d015b4e880d48..917d9bf0a93ee81364175dab8228e102302ae63d 100644 --- a/152334H/miqu-1-70b-sf/raw_2024-05-23T11-20-45.843993/results.json +++ b/152334H/miqu-1-70b-sf/raw_2024-05-23T11-20-45.843993/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9309631191550011, - "acc,all": 0.9309640522875817, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6972270341129023, - "mse,all": 1.0077399689542483, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.6481223922114048, - "acc,exam_id__UNICAMP_2023": 0.6511627906976745, - "acc,exam_id__UNICAMP_2019": 0.66, - "acc,exam_id__USP_2022": 0.5918367346938775, - "acc,exam_id__UNICAMP_2020": 0.6909090909090909, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__USP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2021_2": 0.6862745098039216, - "acc,exam_id__USP_2019": 0.575, - "acc,exam_id__USP_2021": 0.5769230769230769, - "acc,exam_id__USP_2024": 0.8048780487804879, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__USP_2020": 0.6785714285714286, - "acc,exam_id__UNICAMP_2024": 0.6666666666666666, - "acc,exam_id__UNICAMP_2018": 0.6111111111111112, - "acc,exam_id__USP_2023": 0.75, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7466759972008398, - "acc,exam_id__2016": 0.7272727272727273, - "acc,exam_id__2022": 0.6917293233082706, - "acc,exam_id__2009": 0.7217391304347827, - "acc,exam_id__2012": 0.75, - "acc,exam_id__2014": 0.7706422018348624, - "acc,exam_id__2017": 0.7586206896551724, - "acc,exam_id__2016_2": 0.7235772357723578, - "acc,exam_id__2010": 0.7948717948717948, - "acc,exam_id__2011": 0.811965811965812, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2023": 0.7851851851851852 - }, - "faquad_nli": { - "f1_macro,all": 0.7641750093536355, - "acc,all": 0.803076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8382584367896485, - "acc,all": 0.8414285714285714 - }, - "oab_exams": { - "acc,all": 0.5398633257403189, - "acc,exam_id__2012-09": 0.5454545454545454, - "acc,exam_id__2012-06a": 0.5625, - "acc,exam_id__2014-14": 0.5625, - "acc,exam_id__2014-13": 0.4625, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2017-22": 0.6125, - "acc,exam_id__2011-04": 0.5, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2012-08": 0.5625, - "acc,exam_id__2015-16": 0.525, - "acc,exam_id__2011-03": 0.41414141414141414, - "acc,exam_id__2014-15": 0.6538461538461539, - "acc,exam_id__2016-20a": 0.525, - "acc,exam_id__2016-21": 0.4375, - "acc,exam_id__2017-24": 0.5375, - "acc,exam_id__2013-11": 0.575, - "acc,exam_id__2013-12": 0.65, - "acc,exam_id__2011-05": 0.525, - "acc,exam_id__2010-02": 0.59, - "acc,exam_id__2012-07": 0.5, - "acc,exam_id__2013-10": 0.6, - "acc,exam_id__2016-20": 0.5375, - "acc,exam_id__2015-18": 0.6125, - "acc,exam_id__2016-19": 0.5512820512820513, - "acc,exam_id__2015-17": 0.6923076923076923, - "acc,exam_id__2018-25": 0.5, - "acc,exam_id__2012-06": 0.55, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7336708394698086, - "acc,all": 0.7602820211515864 - }, - "tweetsentbr": { - "f1_macro,all": 0.5368519669911656, - "acc,all": 0.7417910447761195, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9309631191550011, + "acc,all": 0.9309640522875817, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6972270341129023, + "mse,all": 1.0077399689542483, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.6481223922114048, + "acc,exam_id__UNICAMP_2023": 0.6511627906976745, + "acc,exam_id__UNICAMP_2019": 0.66, + "acc,exam_id__USP_2022": 0.5918367346938775, + "acc,exam_id__UNICAMP_2020": 0.6909090909090909, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__USP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2021_2": 0.6862745098039216, + "acc,exam_id__USP_2019": 0.575, + "acc,exam_id__USP_2021": 0.5769230769230769, + "acc,exam_id__USP_2024": 0.8048780487804879, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__USP_2020": 0.6785714285714286, + "acc,exam_id__UNICAMP_2024": 0.6666666666666666, + "acc,exam_id__UNICAMP_2018": 0.6111111111111112, + "acc,exam_id__USP_2023": 0.75, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7466759972008398, + "acc,exam_id__2016": 0.7272727272727273, + "acc,exam_id__2022": 0.6917293233082706, + "acc,exam_id__2009": 0.7217391304347827, + "acc,exam_id__2012": 0.75, + "acc,exam_id__2014": 0.7706422018348624, + "acc,exam_id__2017": 0.7586206896551724, + "acc,exam_id__2016_2": 0.7235772357723578, + "acc,exam_id__2010": 0.7948717948717948, + "acc,exam_id__2011": 0.811965811965812, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2023": 0.7851851851851852 + }, + "faquad_nli": { + "f1_macro,all": 0.7641750093536355, + "acc,all": 0.803076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8382584367896485, + "acc,all": 0.8414285714285714 + }, + "oab_exams": { + "acc,all": 0.5398633257403189, + "acc,exam_id__2012-09": 0.5454545454545454, + "acc,exam_id__2012-06a": 0.5625, + "acc,exam_id__2014-14": 0.5625, + "acc,exam_id__2014-13": 0.4625, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2017-22": 0.6125, + "acc,exam_id__2011-04": 0.5, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2012-08": 0.5625, + "acc,exam_id__2015-16": 0.525, + "acc,exam_id__2011-03": 0.41414141414141414, + "acc,exam_id__2014-15": 0.6538461538461539, + "acc,exam_id__2016-20a": 0.525, + "acc,exam_id__2016-21": 0.4375, + "acc,exam_id__2017-24": 0.5375, + "acc,exam_id__2013-11": 0.575, + "acc,exam_id__2013-12": 0.65, + "acc,exam_id__2011-05": 0.525, + "acc,exam_id__2010-02": 0.59, + "acc,exam_id__2012-07": 0.5, + "acc,exam_id__2013-10": 0.6, + "acc,exam_id__2016-20": 0.5375, + "acc,exam_id__2015-18": 0.6125, + "acc,exam_id__2016-19": 0.5512820512820513, + "acc,exam_id__2015-17": 0.6923076923076923, + "acc,exam_id__2018-25": 0.5, + "acc,exam_id__2012-06": 0.55, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7336708394698086, + "acc,all": 0.7602820211515864 + }, + "tweetsentbr": { + "f1_macro,all": 0.7158026226548874, + "acc,all": 0.7417910447761195, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 2, - "accelerate_num_process": null, - "model_sha": "1dca4cce36f01f2104ee2e6b97bac6ff7bb300c1", - "model_dtype": "torch.float16", - "model_memory_footprint": 137953316864, - "model_num_parameters": 68976648192, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1385.9889705882354, - "min_seq_length": 1363, - "max_seq_length": 1452, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1609.9889705882354, - "min_seq_length": 1587, - "max_seq_length": 1676, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1693.7426981919332, - "min_seq_length": 1327, - "max_seq_length": 2453, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1572.9881035689293, - "min_seq_length": 1320, - "max_seq_length": 2612, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1608.1184615384616, - "min_seq_length": 1556, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 2, + "accelerate_num_process": null, + "model_sha": "1dca4cce36f01f2104ee2e6b97bac6ff7bb300c1", + "model_dtype": "torch.float16", + "model_memory_footprint": 137953316864, + "model_num_parameters": 68976648192, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1425.9178571428572, - "min_seq_length": 1402, - "max_seq_length": 1672, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1345.4145785876992, - "min_seq_length": 1090, - "max_seq_length": 1827, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1385.9889705882354, + "min_seq_length": 1363, + "max_seq_length": 1452, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1609.9889705882354, + "min_seq_length": 1587, + "max_seq_length": 1676, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1693.7426981919332, + "min_seq_length": 1327, + "max_seq_length": 2453, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1572.9881035689293, + "min_seq_length": 1320, + "max_seq_length": 2612, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1608.1184615384616, + "min_seq_length": 1556, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1425.9178571428572, + "min_seq_length": 1402, + "max_seq_length": 1672, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1345.4145785876992, + "min_seq_length": 1090, + "max_seq_length": 1827, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1917.801410105758, + "min_seq_length": 1883, + "max_seq_length": 1961, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1692.6845771144278, + "min_seq_length": 1671, + "max_seq_length": 1810, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1917.801410105758, - "min_seq_length": 1883, - "max_seq_length": 1961, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=152334H/miqu-1-70b-sf,dtype=float16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1692.6845771144278, - "min_seq_length": 1671, - "max_seq_length": 1810, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=152334H/miqu-1-70b-sf,dtype=float16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/152334H/miqu-1-70b-sf/results_2024-05-23T11-20-45.843993.json b/152334H/miqu-1-70b-sf/results_2024-05-23T11-20-45.843993.json index 05c03c9a40bfd9bddcb71d7c9f175d55ad55238e..fdded2e803dfe8989449c53f49cfff20c0861b73 100644 --- a/152334H/miqu-1-70b-sf/results_2024-05-23T11-20-45.843993.json +++ b/152334H/miqu-1-70b-sf/results_2024-05-23T11-20-45.843993.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7150897912249694, - "all_grouped_npm": 0.5797294824942764, + "all_grouped_average": 0.7349731974098275, + "all_grouped_npm": 0.6093178845550768, "all_grouped": { "enem_challenge": 0.7466759972008398, "bluex": 0.6481223922114048, @@ -45,7 +45,7 @@ "faquad_nli": 0.7641750093536355, "hatebr_offensive": 0.8382584367896485, "portuguese_hate_speech": 0.7336708394698086, - "tweetsentbr": 0.5368519669911656 + "tweetsentbr": 0.7158026226548874 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7466759972008398, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7641750093536355, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8382584367896485, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7336708394698086, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5368519669911656 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7158026226548874 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7466759972008398, @@ -150,9 +150,9 @@ "main_score": 0.7336708394698086 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5368519669911656, + "f1_macro,all": 0.7158026226548874, "acc,all": 0.7417910447761195, - "main_score": 0.5368519669911656 + "main_score": 0.7158026226548874 } }, "config_tasks": { diff --git a/BAAI/Infinity-Instruct-3M-0613-Mistral-7B/raw_2024-06-22T01-31-31.647844/results.json b/BAAI/Infinity-Instruct-3M-0613-Mistral-7B/raw_2024-06-22T01-31-31.647844/results.json index 2b177632d9bc0d7063f616badb10b703b9072da3..8f175383a5cbd045c524d323572cf90dfa7871a0 100644 --- a/BAAI/Infinity-Instruct-3M-0613-Mistral-7B/raw_2024-06-22T01-31-31.647844/results.json +++ b/BAAI/Infinity-Instruct-3M-0613-Mistral-7B/raw_2024-06-22T01-31-31.647844/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9174712657490132, - "acc,all": 0.9174836601307189, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7632047672731808, - "mse,all": 0.5285130718954247, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5326842837273992, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__UNICAMP_2018": 0.35185185185185186, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__UNICAMP_2019": 0.5, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803, - "acc,exam_id__UNICAMP_2023": 0.6976744186046512, - "acc,exam_id__UNICAMP_2020": 0.6363636363636364, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6466060181945417, - "acc,exam_id__2017": 0.6206896551724138, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2016": 0.6446280991735537, - "acc,exam_id__2010": 0.6153846153846154, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2014": 0.6513761467889908, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2016_2": 0.6585365853658537, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2009": 0.5826086956521739, - "acc,exam_id__2023": 0.6814814814814815 - }, - "faquad_nli": { - "f1_macro,all": 0.8241841468197617, - "acc,all": 0.8769230769230769, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7990490978163615, - "acc,all": 0.8042857142857143 - }, - "oab_exams": { - "acc,all": 0.44510250569476084, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2016-21": 0.4875, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2013-10": 0.4375, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2012-08": 0.475, - "acc,exam_id__2015-18": 0.4875, - "acc,exam_id__2017-24": 0.5125, - "acc,exam_id__2017-22": 0.4875, - "acc,exam_id__2016-19": 0.5512820512820513, - "acc,exam_id__2010-02": 0.52, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2017-23": 0.4, - "acc,exam_id__2014-15": 0.41025641025641024, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2015-16": 0.4375, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2013-12": 0.475, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2012-06a": 0.4875, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2011-04": 0.3625, - "acc,exam_id__2015-17": 0.5512820512820513, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7141208181486736, - "acc,all": 0.7520564042303173 - }, - "tweetsentbr": { - "f1_macro,all": 0.49998821485827083, - "acc,all": 0.7079601990049751, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9174712657490132, + "acc,all": 0.9174836601307189, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7632047672731808, + "mse,all": 0.5285130718954247, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5326842837273992, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__UNICAMP_2018": 0.35185185185185186, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__UNICAMP_2019": 0.5, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803, + "acc,exam_id__UNICAMP_2023": 0.6976744186046512, + "acc,exam_id__UNICAMP_2020": 0.6363636363636364, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6466060181945417, + "acc,exam_id__2017": 0.6206896551724138, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2016": 0.6446280991735537, + "acc,exam_id__2010": 0.6153846153846154, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2014": 0.6513761467889908, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2016_2": 0.6585365853658537, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2009": 0.5826086956521739, + "acc,exam_id__2023": 0.6814814814814815 + }, + "faquad_nli": { + "f1_macro,all": 0.8241841468197617, + "acc,all": 0.8769230769230769, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7990490978163615, + "acc,all": 0.8042857142857143 + }, + "oab_exams": { + "acc,all": 0.44510250569476084, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2016-21": 0.4875, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2013-10": 0.4375, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2012-08": 0.475, + "acc,exam_id__2015-18": 0.4875, + "acc,exam_id__2017-24": 0.5125, + "acc,exam_id__2017-22": 0.4875, + "acc,exam_id__2016-19": 0.5512820512820513, + "acc,exam_id__2010-02": 0.52, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2017-23": 0.4, + "acc,exam_id__2014-15": 0.41025641025641024, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2015-16": 0.4375, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2013-12": 0.475, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2012-06a": 0.4875, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2011-04": 0.3625, + "acc,exam_id__2015-17": 0.5512820512820513, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7141208181486736, + "acc,all": 0.7520564042303173 + }, + "tweetsentbr": { + "f1_macro,all": 0.6666509531443612, + "acc,all": 0.7079601990049751, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c1bec5cb82a7a561d8c7459cc9f5685f938c7f34", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020376064, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c1bec5cb82a7a561d8c7459cc9f5685f938c7f34", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020376064, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=BAAI/Infinity-Instruct-3M-0613-Mistral-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=BAAI/Infinity-Instruct-3M-0613-Mistral-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/BAAI/Infinity-Instruct-3M-0613-Mistral-7B/results_2024-06-22T01-31-31.647844.json b/BAAI/Infinity-Instruct-3M-0613-Mistral-7B/results_2024-06-22T01-31-31.647844.json index 9253d65622941a9085d38353f55cac85722f7ce2..175c3d3f5f26d023f57a63ee6cafc9bbcd3ca947 100644 --- a/BAAI/Infinity-Instruct-3M-0613-Mistral-7B/results_2024-06-22T01-31-31.647844.json +++ b/BAAI/Infinity-Instruct-3M-0613-Mistral-7B/results_2024-06-22T01-31-31.647844.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6824901242535515, - "all_grouped_npm": 0.5328536851326985, + "all_grouped_average": 0.7010082062853393, + "all_grouped_npm": 0.5604103548228588, "all_grouped": { "enem_challenge": 0.6466060181945417, "bluex": 0.5326842837273992, @@ -45,7 +45,7 @@ "faquad_nli": 0.8241841468197617, "hatebr_offensive": 0.7990490978163615, "portuguese_hate_speech": 0.7141208181486736, - "tweetsentbr": 0.49998821485827083 + "tweetsentbr": 0.6666509531443612 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6466060181945417, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.8241841468197617, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7990490978163615, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7141208181486736, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49998821485827083 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6666509531443612 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6466060181945417, @@ -150,9 +150,9 @@ "main_score": 0.7141208181486736 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49998821485827083, + "f1_macro,all": 0.6666509531443612, "acc,all": 0.7079601990049751, - "main_score": 0.49998821485827083 + "main_score": 0.6666509531443612 } }, "config_tasks": { diff --git a/BAAI/Infinity-Instruct-3M-0625-Mistral-7B/raw_2024-07-19T01-41-09.242433/results.json b/BAAI/Infinity-Instruct-3M-0625-Mistral-7B/raw_2024-07-19T01-41-09.242433/results.json index 24239ac05f335f82e27556f5ebf43f0c9e99ef9d..377fa5fd0cb123e7e223e3a7acf71eb67c151aa4 100644 --- a/BAAI/Infinity-Instruct-3M-0625-Mistral-7B/raw_2024-07-19T01-41-09.242433/results.json +++ b/BAAI/Infinity-Instruct-3M-0625-Mistral-7B/raw_2024-07-19T01-41-09.242433/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9195035639155449, - "acc,all": 0.9195261437908496, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7748240246646378, - "mse,all": 0.5014828431372549, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5507649513212796, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2022": 0.5918367346938775, - "acc,exam_id__USP_2019": 0.525, - "acc,exam_id__UNICAMP_2019": 0.5, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2021": 0.5769230769230769, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6634009797060881, - "acc,exam_id__2022": 0.6766917293233082, - "acc,exam_id__2013": 0.7129629629629629, - "acc,exam_id__2012": 0.6551724137931034, - "acc,exam_id__2010": 0.6581196581196581, - "acc,exam_id__2016_2": 0.6422764227642277, - "acc,exam_id__2023": 0.6814814814814815, - "acc,exam_id__2016": 0.6528925619834711, - "acc,exam_id__2009": 0.6086956521739131, - "acc,exam_id__2014": 0.6697247706422018, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2015": 0.6386554621848739 - }, - "faquad_nli": { - "f1_macro,all": 0.8110585067106806, - "acc,all": 0.8661538461538462, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8052263390689189, - "acc,all": 0.8107142857142857 - }, - "oab_exams": { - "acc,all": 0.44419134396355353, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2012-06": 0.45, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2011-05": 0.5375, - "acc,exam_id__2017-24": 0.45, - "acc,exam_id__2016-21": 0.4875, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2017-22": 0.475, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2016-20": 0.45, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2012-08": 0.475, - "acc,exam_id__2014-14": 0.5375, - "acc,exam_id__2010-02": 0.49, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2015-18": 0.5, - "acc,exam_id__2015-17": 0.5897435897435898, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2013-12": 0.5, - "acc,exam_id__2011-04": 0.3375, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2012-06a": 0.4875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7276695425104325, - "acc,all": 0.7649823736780259 - }, - "tweetsentbr": { - "f1_macro,all": 0.5138720675986127, - "acc,all": 0.7189054726368159, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9195035639155449, + "acc,all": 0.9195261437908496, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7748240246646378, + "mse,all": 0.5014828431372549, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5507649513212796, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2022": 0.5918367346938775, + "acc,exam_id__USP_2019": 0.525, + "acc,exam_id__UNICAMP_2019": 0.5, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2021": 0.5769230769230769, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6634009797060881, + "acc,exam_id__2022": 0.6766917293233082, + "acc,exam_id__2013": 0.7129629629629629, + "acc,exam_id__2012": 0.6551724137931034, + "acc,exam_id__2010": 0.6581196581196581, + "acc,exam_id__2016_2": 0.6422764227642277, + "acc,exam_id__2023": 0.6814814814814815, + "acc,exam_id__2016": 0.6528925619834711, + "acc,exam_id__2009": 0.6086956521739131, + "acc,exam_id__2014": 0.6697247706422018, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2015": 0.6386554621848739 + }, + "faquad_nli": { + "f1_macro,all": 0.8110585067106806, + "acc,all": 0.8661538461538462, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8052263390689189, + "acc,all": 0.8107142857142857 + }, + "oab_exams": { + "acc,all": 0.44419134396355353, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2012-06": 0.45, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2011-05": 0.5375, + "acc,exam_id__2017-24": 0.45, + "acc,exam_id__2016-21": 0.4875, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2017-22": 0.475, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2016-20": 0.45, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2012-08": 0.475, + "acc,exam_id__2014-14": 0.5375, + "acc,exam_id__2010-02": 0.49, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2015-18": 0.5, + "acc,exam_id__2015-17": 0.5897435897435898, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2013-12": 0.5, + "acc,exam_id__2011-04": 0.3375, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2012-06a": 0.4875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7276695425104325, + "acc,all": 0.7649823736780259 + }, + "tweetsentbr": { + "f1_macro,all": 0.6851627567981504, + "acc,all": 0.7189054726368159, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "302e3ae0bcc50dae3fb69fc1b08b518398e8c407", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14483505152, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "302e3ae0bcc50dae3fb69fc1b08b518398e8c407", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14483505152, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=BAAI/Infinity-Instruct-3M-0625-Mistral-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=BAAI/Infinity-Instruct-3M-0625-Mistral-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/BAAI/Infinity-Instruct-3M-0625-Mistral-7B/results_2024-07-19T01-41-09.242433.json b/BAAI/Infinity-Instruct-3M-0625-Mistral-7B/results_2024-07-19T01-41-09.242433.json index 830b1b5472f574a8e041dfbf6d66edb3f64f2f78..46188c6fc423ac8230abbe3015be523e46cbca68 100644 --- a/BAAI/Infinity-Instruct-3M-0625-Mistral-7B/results_2024-07-19T01-41-09.242433.json +++ b/BAAI/Infinity-Instruct-3M-0625-Mistral-7B/results_2024-07-19T01-41-09.242433.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6900568132733055, - "all_grouped_npm": 0.5432631027798649, + "all_grouped_average": 0.7090891120732541, + "all_grouped_npm": 0.5715849759940743, "all_grouped": { "enem_challenge": 0.6634009797060881, "bluex": 0.5507649513212796, @@ -45,7 +45,7 @@ "faquad_nli": 0.8110585067106806, "hatebr_offensive": 0.8052263390689189, "portuguese_hate_speech": 0.7276695425104325, - "tweetsentbr": 0.5138720675986127 + "tweetsentbr": 0.6851627567981504 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6634009797060881, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.8110585067106806, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8052263390689189, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7276695425104325, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5138720675986127 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6851627567981504 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6634009797060881, @@ -150,9 +150,9 @@ "main_score": 0.7276695425104325 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5138720675986127, + "f1_macro,all": 0.6851627567981504, "acc,all": 0.7189054726368159, - "main_score": 0.5138720675986127 + "main_score": 0.6851627567981504 } }, "config_tasks": { diff --git a/CohereForAI/c4ai-command-r-v01/raw_2024-04-17T00-36-42.568466/results.json b/CohereForAI/c4ai-command-r-v01/raw_2024-04-17T00-36-42.568466/results.json index 0c6c3b5e94114d67d9515a55daeb226dca1f59b8..ce8d4b515303e27284b3679cc50e99bea1ead602 100644 --- a/CohereForAI/c4ai-command-r-v01/raw_2024-04-17T00-36-42.568466/results.json +++ b/CohereForAI/c4ai-command-r-v01/raw_2024-04-17T00-36-42.568466/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.883132179380006, - "acc,all": 0.8831699346405228, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7210331309303998, - "mse,all": 0.6012867647058824, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.6203059805285118, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__USP_2023": 0.75, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__USP_2022": 0.6326530612244898, - "acc,exam_id__UNICAMP_2019": 0.64, - "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, - "acc,exam_id__USP_2019": 0.6, - "acc,exam_id__UNICAMP_2022": 0.717948717948718, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__UNICAMP_2023": 0.7209302325581395, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__USP_2021": 0.6730769230769231, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7158852344296711, - "acc,exam_id__2009": 0.6956521739130435, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2010": 0.7521367521367521, - "acc,exam_id__2012": 0.75, - "acc,exam_id__2016_2": 0.6747967479674797, - "acc,exam_id__2017": 0.7241379310344828, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2023": 0.7481481481481481, - "acc,exam_id__2011": 0.7692307692307693, - "acc,exam_id__2014": 0.7431192660550459, - "acc,exam_id__2015": 0.6890756302521008, - "acc,exam_id__2016": 0.7024793388429752 - }, - "faquad_nli": { - "f1_macro,all": 0.47272296015180265, - "acc,all": 0.47384615384615386, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8222299935886227, - "acc,all": 0.8257142857142857 - }, - "oab_exams": { - "acc,all": 0.5521640091116173, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2014-14": 0.6375, - "acc,exam_id__2013-11": 0.5375, - "acc,exam_id__2013-12": 0.5875, - "acc,exam_id__2015-16": 0.5125, - "acc,exam_id__2016-19": 0.5769230769230769, - "acc,exam_id__2015-17": 0.6923076923076923, - "acc,exam_id__2016-21": 0.4625, - "acc,exam_id__2011-03": 0.5050505050505051, - "acc,exam_id__2012-08": 0.5625, - "acc,exam_id__2012-07": 0.5375, - "acc,exam_id__2011-04": 0.4625, - "acc,exam_id__2013-10": 0.5875, - "acc,exam_id__2012-06a": 0.6375, - "acc,exam_id__2010-01": 0.4117647058823529, - "acc,exam_id__2017-23": 0.5625, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2012-06": 0.5375, - "acc,exam_id__2017-22": 0.6375, - "acc,exam_id__2014-15": 0.6538461538461539, - "acc,exam_id__2010-02": 0.62, - "acc,exam_id__2014-13": 0.5375, - "acc,exam_id__2016-20": 0.575, - "acc,exam_id__2011-05": 0.5875, - "acc,exam_id__2016-20a": 0.5625, - "acc,exam_id__2012-09": 0.45454545454545453, - "acc,exam_id__2017-24": 0.4875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7102306144559665, - "acc,all": 0.7285546415981199 - }, - "tweetsentbr": { - "f1_macro,all": 0.48595613300125107, - "acc,all": 0.7114427860696517, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.883132179380006, + "acc,all": 0.8831699346405228, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7210331309303998, + "mse,all": 0.6012867647058824, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.6203059805285118, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__USP_2023": 0.75, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__USP_2022": 0.6326530612244898, + "acc,exam_id__UNICAMP_2019": 0.64, + "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, + "acc,exam_id__USP_2019": 0.6, + "acc,exam_id__UNICAMP_2022": 0.717948717948718, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__UNICAMP_2023": 0.7209302325581395, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__USP_2021": 0.6730769230769231, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7158852344296711, + "acc,exam_id__2009": 0.6956521739130435, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2010": 0.7521367521367521, + "acc,exam_id__2012": 0.75, + "acc,exam_id__2016_2": 0.6747967479674797, + "acc,exam_id__2017": 0.7241379310344828, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2023": 0.7481481481481481, + "acc,exam_id__2011": 0.7692307692307693, + "acc,exam_id__2014": 0.7431192660550459, + "acc,exam_id__2015": 0.6890756302521008, + "acc,exam_id__2016": 0.7024793388429752 + }, + "faquad_nli": { + "f1_macro,all": 0.47272296015180265, + "acc,all": 0.47384615384615386, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8222299935886227, + "acc,all": 0.8257142857142857 + }, + "oab_exams": { + "acc,all": 0.5521640091116173, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2014-14": 0.6375, + "acc,exam_id__2013-11": 0.5375, + "acc,exam_id__2013-12": 0.5875, + "acc,exam_id__2015-16": 0.5125, + "acc,exam_id__2016-19": 0.5769230769230769, + "acc,exam_id__2015-17": 0.6923076923076923, + "acc,exam_id__2016-21": 0.4625, + "acc,exam_id__2011-03": 0.5050505050505051, + "acc,exam_id__2012-08": 0.5625, + "acc,exam_id__2012-07": 0.5375, + "acc,exam_id__2011-04": 0.4625, + "acc,exam_id__2013-10": 0.5875, + "acc,exam_id__2012-06a": 0.6375, + "acc,exam_id__2010-01": 0.4117647058823529, + "acc,exam_id__2017-23": 0.5625, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2012-06": 0.5375, + "acc,exam_id__2017-22": 0.6375, + "acc,exam_id__2014-15": 0.6538461538461539, + "acc,exam_id__2010-02": 0.62, + "acc,exam_id__2014-13": 0.5375, + "acc,exam_id__2016-20": 0.575, + "acc,exam_id__2011-05": 0.5875, + "acc,exam_id__2016-20a": 0.5625, + "acc,exam_id__2012-09": 0.45454545454545453, + "acc,exam_id__2017-24": 0.4875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7102306144559665, + "acc,all": 0.7285546415981199 + }, + "tweetsentbr": { + "f1_macro,all": 0.6479415106683347, + "acc,all": 0.7114427860696517, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "16881ccde1c68bbc7041280e6a66637bc46bfe88", - "model_dtype": "torch.float16", - "model_memory_footprint": 69961672704, - "model_num_parameters": 34980831232, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1038.3545751633987, - "min_seq_length": 1022, - "max_seq_length": 1082, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1283.3545751633987, - "min_seq_length": 1267, - "max_seq_length": 1327, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1355.8970792767732, - "min_seq_length": 1076, - "max_seq_length": 1949, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1217.5381385584324, - "min_seq_length": 1027, - "max_seq_length": 2206, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1138.0676923076924, - "min_seq_length": 1102, - "max_seq_length": 1205, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "16881ccde1c68bbc7041280e6a66637bc46bfe88", + "model_dtype": "torch.float16", + "model_memory_footprint": 69961672704, + "model_num_parameters": 34980831232, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1095.9871428571428, - "min_seq_length": 1079, - "max_seq_length": 1283, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 970.0168564920274, - "min_seq_length": 784, - "max_seq_length": 1291, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1038.3545751633987, + "min_seq_length": 1022, + "max_seq_length": 1082, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1283.3545751633987, + "min_seq_length": 1267, + "max_seq_length": 1327, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1355.8970792767732, + "min_seq_length": 1076, + "max_seq_length": 1949, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1217.5381385584324, + "min_seq_length": 1027, + "max_seq_length": 2206, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1138.0676923076924, + "min_seq_length": 1102, + "max_seq_length": 1205, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1095.9871428571428, + "min_seq_length": 1079, + "max_seq_length": 1283, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 970.0168564920274, + "min_seq_length": 784, + "max_seq_length": 1291, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1454.6169212690952, + "min_seq_length": 1427, + "max_seq_length": 1491, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1420.0830845771145, + "min_seq_length": 1404, + "max_seq_length": 1470, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1454.6169212690952, - "min_seq_length": 1427, - "max_seq_length": 1491, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=CohereForAI/c4ai-command-r-v01,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1420.0830845771145, - "min_seq_length": 1404, - "max_seq_length": 1470, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=CohereForAI/c4ai-command-r-v01,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/CohereForAI/c4ai-command-r-v01/results_2024-04-17T00-36-42.568466.json b/CohereForAI/c4ai-command-r-v01/results_2024-04-17T00-36-42.568466.json index 162b35d8512d40479cf2e7702560f0e13d9aacc2..c15a320046cfea232bc88795843c1892e9d81158 100644 --- a/CohereForAI/c4ai-command-r-v01/results_2024-04-17T00-36-42.568466.json +++ b/CohereForAI/c4ai-command-r-v01/results_2024-04-17T00-36-42.568466.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6648511372864276, - "all_grouped_npm": 0.4887985400400404, + "all_grouped_average": 0.6828495125827703, + "all_grouped_npm": 0.5155818366119789, "all_grouped": { "enem_challenge": 0.7158852344296711, "bluex": 0.6203059805285118, @@ -45,7 +45,7 @@ "faquad_nli": 0.47272296015180265, "hatebr_offensive": 0.8222299935886227, "portuguese_hate_speech": 0.7102306144559665, - "tweetsentbr": 0.48595613300125107 + "tweetsentbr": 0.6479415106683347 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7158852344296711, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.47272296015180265, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8222299935886227, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7102306144559665, - "harness|tweetsentbr|tweetsentbr|None|25": 0.48595613300125107 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6479415106683347 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7158852344296711, @@ -150,9 +150,9 @@ "main_score": 0.7102306144559665 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.48595613300125107, + "f1_macro,all": 0.6479415106683347, "acc,all": 0.7114427860696517, - "main_score": 0.48595613300125107 + "main_score": 0.6479415106683347 } }, "config_tasks": { diff --git a/Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/raw_2024-07-15T01-32-05.828202/results.json b/Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/raw_2024-07-15T01-32-05.828202/results.json index cd1aef6451b123440fb2457d7a894219d7e2ff52..dfc397eef09683d1349f1d0388315daa8b5c3764 100644 --- a/Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/raw_2024-07-15T01-32-05.828202/results.json +++ b/Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/raw_2024-07-15T01-32-05.828202/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9193834267092047, - "acc,all": 0.9195261437908496, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7172104868084787, - "mse,all": 0.6943019494681872, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.48817802503477054, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2019": 0.475, - "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803, - "acc,exam_id__UNICAMP_2022": 0.46153846153846156, - "acc,exam_id__UNICAMP_2018": 0.37037037037037035, - "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, - "acc,exam_id__UNICAMP_2020": 0.509090909090909, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2019": 0.48, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__UNICAMP_2023": 0.4418604651162791, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__USP_2024": 0.6097560975609756, - "acc,exam_id__USP_2023": 0.5909090909090909, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6368089573128062, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2015": 0.680672268907563, - "acc,exam_id__2017": 0.5948275862068966, - "acc,exam_id__2016": 0.6198347107438017, - "acc,exam_id__2016_2": 0.5528455284552846, - "acc,exam_id__2014": 0.6697247706422018, - "acc,exam_id__2023": 0.6148148148148148, - "acc,exam_id__2013": 0.6203703703703703, - "acc,exam_id__2010": 0.6410256410256411, - "acc,exam_id__2009": 0.6782608695652174, - "acc,exam_id__2022": 0.6390977443609023, - "acc,exam_id__2012": 0.6293103448275862 - }, - "faquad_nli": { - "f1_macro,all": 0.48579447418078225, - "acc,all": 0.7630769230769231, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.5709864973474453, - "acc,all": 0.8535714285714285 - }, - "oab_exams": { - "acc,all": 0.43143507972665146, - "acc,exam_id__2012-06": 0.4125, - "acc,exam_id__2016-21": 0.325, - "acc,exam_id__2013-11": 0.375, - "acc,exam_id__2012-07": 0.45, - "acc,exam_id__2010-02": 0.46, - "acc,exam_id__2012-09": 0.4805194805194805, - "acc,exam_id__2016-19": 0.4230769230769231, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2014-13": 0.3875, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2015-16": 0.3125, - "acc,exam_id__2017-22": 0.5125, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2014-15": 0.6025641025641025, - "acc,exam_id__2016-20a": 0.4125, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2014-14": 0.5375, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2016-20": 0.4375, - "acc,exam_id__2013-12": 0.5, - "acc,exam_id__2015-18": 0.4375, - "acc,exam_id__2011-04": 0.3, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.475879953813593, - "acc,all": 0.7379553466509988 - }, - "tweetsentbr": { - "f1_macro,all": 0.5334393438436622, - "acc,all": 0.7338308457711443, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9193834267092047, + "acc,all": 0.9195261437908496, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7172104868084787, + "mse,all": 0.6943019494681872, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.48817802503477054, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2019": 0.475, + "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803, + "acc,exam_id__UNICAMP_2022": 0.46153846153846156, + "acc,exam_id__UNICAMP_2018": 0.37037037037037035, + "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, + "acc,exam_id__UNICAMP_2020": 0.509090909090909, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2019": 0.48, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__UNICAMP_2023": 0.4418604651162791, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__USP_2024": 0.6097560975609756, + "acc,exam_id__USP_2023": 0.5909090909090909, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6368089573128062, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2015": 0.680672268907563, + "acc,exam_id__2017": 0.5948275862068966, + "acc,exam_id__2016": 0.6198347107438017, + "acc,exam_id__2016_2": 0.5528455284552846, + "acc,exam_id__2014": 0.6697247706422018, + "acc,exam_id__2023": 0.6148148148148148, + "acc,exam_id__2013": 0.6203703703703703, + "acc,exam_id__2010": 0.6410256410256411, + "acc,exam_id__2009": 0.6782608695652174, + "acc,exam_id__2022": 0.6390977443609023, + "acc,exam_id__2012": 0.6293103448275862 + }, + "faquad_nli": { + "f1_macro,all": 0.7286917112711735, + "acc,all": 0.7630769230769231, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8564797460211679, + "acc,all": 0.8535714285714285 + }, + "oab_exams": { + "acc,all": 0.43143507972665146, + "acc,exam_id__2012-06": 0.4125, + "acc,exam_id__2016-21": 0.325, + "acc,exam_id__2013-11": 0.375, + "acc,exam_id__2012-07": 0.45, + "acc,exam_id__2010-02": 0.46, + "acc,exam_id__2012-09": 0.4805194805194805, + "acc,exam_id__2016-19": 0.4230769230769231, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2014-13": 0.3875, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2015-16": 0.3125, + "acc,exam_id__2017-22": 0.5125, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2014-15": 0.6025641025641025, + "acc,exam_id__2016-20a": 0.4125, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2014-14": 0.5375, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2016-20": 0.4375, + "acc,exam_id__2013-12": 0.5, + "acc,exam_id__2015-18": 0.4375, + "acc,exam_id__2011-04": 0.3, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7138199307203894, + "acc,all": 0.7379553466509988 + }, + "tweetsentbr": { + "f1_macro,all": 0.7112524584582163, + "acc,all": 0.7338308457711443, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "d947400efa0d824ac158c5e41bbe1dbed398d257", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530688, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1317.5322712418301, - "min_seq_length": 1298, - "max_seq_length": 1381, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1508.5322712418301, - "min_seq_length": 1489, - "max_seq_length": 1572, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1483.7719054242002, - "min_seq_length": 1164, - "max_seq_length": 2133, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1411.3547935619315, - "min_seq_length": 1186, - "max_seq_length": 2339, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1446.8215384615385, - "min_seq_length": 1401, - "max_seq_length": 1543, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "d947400efa0d824ac158c5e41bbe1dbed398d257", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530688, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1278.3878571428572, - "min_seq_length": 1258, - "max_seq_length": 1497, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1219.3772209567198, - "min_seq_length": 987, - "max_seq_length": 1653, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1317.5322712418301, + "min_seq_length": 1298, + "max_seq_length": 1381, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1508.5322712418301, + "min_seq_length": 1489, + "max_seq_length": 1572, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1483.7719054242002, + "min_seq_length": 1164, + "max_seq_length": 2133, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1411.3547935619315, + "min_seq_length": 1186, + "max_seq_length": 2339, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1446.8215384615385, + "min_seq_length": 1401, + "max_seq_length": 1543, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1278.3878571428572, + "min_seq_length": 1258, + "max_seq_length": 1497, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1219.3772209567198, + "min_seq_length": 987, + "max_seq_length": 1653, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1675.4195064629848, + "min_seq_length": 1645, + "max_seq_length": 1707, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1536.1537313432837, + "min_seq_length": 1519, + "max_seq_length": 1584, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1675.4195064629848, - "min_seq_length": 1645, - "max_seq_length": 1707, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1536.1537313432837, - "min_seq_length": 1519, - "max_seq_length": 1584, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/results_2024-07-15T01-32-05.828202.json b/Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/results_2024-07-15T01-32-05.828202.json index 4efdf6b95cba62a77108eed3c0090d8a2ded0807..b5da143098e97d7113ae2678ad71cab1cec4db5c 100644 --- a/Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/results_2024-07-15T01-32-05.828202.json +++ b/Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/results_2024-07-15T01-32-05.828202.json @@ -34,18 +34,18 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5843462494197105, - "all_grouped_npm": 0.3533281678439427, + "all_grouped_average": 0.6892510913403176, + "all_grouped_npm": 0.546527082582236, "all_grouped": { "enem_challenge": 0.6368089573128062, "bluex": 0.48817802503477054, "oab_exams": 0.43143507972665146, "assin2_rte": 0.9193834267092047, "assin2_sts": 0.7172104868084787, - "faquad_nli": 0.48579447418078225, - "hatebr_offensive": 0.5709864973474453, - "portuguese_hate_speech": 0.475879953813593, - "tweetsentbr": 0.5334393438436622 + "faquad_nli": 0.7286917112711735, + "hatebr_offensive": 0.8564797460211679, + "portuguese_hate_speech": 0.7138199307203894, + "tweetsentbr": 0.7112524584582163 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6368089573128062, @@ -53,10 +53,10 @@ "harness|oab_exams|oab_exams|None|3": 0.43143507972665146, "harness|assin2_rte|assin2_rte|None|15": 0.9193834267092047, "harness|assin2_sts|assin2_sts|None|15": 0.7172104868084787, - "harness|faquad_nli|faquad_nli|None|15": 0.48579447418078225, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5709864973474453, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.475879953813593, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5334393438436622 + "harness|faquad_nli|faquad_nli|None|15": 0.7286917112711735, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8564797460211679, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7138199307203894, + "harness|tweetsentbr|tweetsentbr|None|25": 0.7112524584582163 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6368089573128062, @@ -135,24 +135,24 @@ "main_score": 0.7172104868084787 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.48579447418078225, + "f1_macro,all": 0.7286917112711735, "acc,all": 0.7630769230769231, - "main_score": 0.48579447418078225 + "main_score": 0.7286917112711735 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.5709864973474453, + "f1_macro,all": 0.8564797460211679, "acc,all": 0.8535714285714285, - "main_score": 0.5709864973474453 + "main_score": 0.8564797460211679 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.475879953813593, + "f1_macro,all": 0.7138199307203894, "acc,all": 0.7379553466509988, - "main_score": 0.475879953813593 + "main_score": 0.7138199307203894 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5334393438436622, + "f1_macro,all": 0.7112524584582163, "acc,all": 0.7338308457711443, - "main_score": 0.5334393438436622 + "main_score": 0.7112524584582163 } }, "config_tasks": { diff --git a/CultriX/NeuralMona_MoE-4x7B/raw_2024-05-26T13-29-26.736769/results.json b/CultriX/NeuralMona_MoE-4x7B/raw_2024-05-26T13-29-26.736769/results.json index 8f8b37e21cc92fced14d98e2a2abebaa44ed8f6b..960436f7eb6b86a4e1426a05f3a40f23345d5aa0 100644 --- a/CultriX/NeuralMona_MoE-4x7B/raw_2024-05-26T13-29-26.736769/results.json +++ b/CultriX/NeuralMona_MoE-4x7B/raw_2024-05-26T13-29-26.736769/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9244279910791389, - "acc,all": 0.9244281045751634, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7720274719342004, - "mse,all": 0.4360906862745098, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5340751043115438, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__USP_2022": 0.46938775510204084, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6312106368089573, - "acc,exam_id__2011": 0.6410256410256411, - "acc,exam_id__2017": 0.6379310344827587, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2016": 0.5537190082644629, - "acc,exam_id__2016_2": 0.6341463414634146, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2014": 0.6055045871559633, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2023": 0.6370370370370371 - }, - "faquad_nli": { - "f1_macro,all": 0.7694314032342202, - "acc,all": 0.816923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8409826856991804, - "acc,all": 0.8428571428571429 - }, - "oab_exams": { - "acc,all": 0.4214123006833713, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2017-24": 0.4, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2012-06a": 0.375, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2012-09": 0.38961038961038963, - "acc,exam_id__2015-16": 0.4125, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2014-14": 0.5375, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2011-05": 0.4875, - "acc,exam_id__2017-23": 0.3875, - "acc,exam_id__2013-11": 0.475, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2015-17": 0.5128205128205128, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6819724557061289, - "acc,all": 0.7132784958871915 - }, - "tweetsentbr": { - "f1_macro,all": 0.4835212271941399, - "acc,all": 0.6955223880597015, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9244279910791389, + "acc,all": 0.9244281045751634, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7720274719342004, + "mse,all": 0.4360906862745098, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5340751043115438, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__USP_2022": 0.46938775510204084, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6312106368089573, + "acc,exam_id__2011": 0.6410256410256411, + "acc,exam_id__2017": 0.6379310344827587, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2016": 0.5537190082644629, + "acc,exam_id__2016_2": 0.6341463414634146, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2014": 0.6055045871559633, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2023": 0.6370370370370371 + }, + "faquad_nli": { + "f1_macro,all": 0.7694314032342202, + "acc,all": 0.816923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8409826856991804, + "acc,all": 0.8428571428571429 + }, + "oab_exams": { + "acc,all": 0.4214123006833713, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2017-24": 0.4, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2012-06a": 0.375, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2012-09": 0.38961038961038963, + "acc,exam_id__2015-16": 0.4125, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2014-14": 0.5375, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2011-05": 0.4875, + "acc,exam_id__2017-23": 0.3875, + "acc,exam_id__2013-11": 0.475, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2015-17": 0.5128205128205128, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6819724557061289, + "acc,all": 0.7132784958871915 + }, + "tweetsentbr": { + "f1_macro,all": 0.6446949695921868, + "acc,all": 0.6955223880597015, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "869c5cafb3f5002a0d273621519e3f352418eded", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "869c5cafb3f5002a0d273621519e3f352418eded", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=CultriX/NeuralMona_MoE-4x7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=CultriX/NeuralMona_MoE-4x7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/CultriX/NeuralMona_MoE-4x7B/results_2024-05-26T13-29-26.736769.json b/CultriX/NeuralMona_MoE-4x7B/results_2024-05-26T13-29-26.736769.json index 4962b5d65dbfa898566224e39a38468db3528a49..ecb9d1e4a9b5fd439dae22b6385c47541b4ea116 100644 --- a/CultriX/NeuralMona_MoE-4x7B/results_2024-05-26T13-29-26.736769.json +++ b/CultriX/NeuralMona_MoE-4x7B/results_2024-05-26T13-29-26.736769.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6732290307389868, - "all_grouped_npm": 0.5184879950941655, + "all_grouped_average": 0.6911372243387697, + "all_grouped_npm": 0.5451370927128902, "all_grouped": { "enem_challenge": 0.6312106368089573, "bluex": 0.5340751043115438, @@ -45,7 +45,7 @@ "faquad_nli": 0.7694314032342202, "hatebr_offensive": 0.8409826856991804, "portuguese_hate_speech": 0.6819724557061289, - "tweetsentbr": 0.4835212271941399 + "tweetsentbr": 0.6446949695921868 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6312106368089573, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7694314032342202, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8409826856991804, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6819724557061289, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4835212271941399 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6446949695921868 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6312106368089573, @@ -150,9 +150,9 @@ "main_score": 0.6819724557061289 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4835212271941399, + "f1_macro,all": 0.6446949695921868, "acc,all": 0.6955223880597015, - "main_score": 0.4835212271941399 + "main_score": 0.6446949695921868 } }, "config_tasks": { diff --git a/Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/raw_2024-08-08T02-43-35.640819/results.json b/Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/raw_2024-08-08T02-43-35.640819/results.json index 9f3d208afd74ab7315e4e0f2379d50a4c98247d2..327e451e75cbb076c54b26f40ca8270d5f055c65 100644 --- a/Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/raw_2024-08-08T02-43-35.640819/results.json +++ b/Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/raw_2024-08-08T02-43-35.640819/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9259642329554806, - "acc,all": 0.926062091503268, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.714480317302389, - "mse,all": 0.9154340392156861, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.6578581363004172, - "acc,exam_id__USP_2018": 0.5370370370370371, - "acc,exam_id__USP_2020": 0.6785714285714286, - "acc,exam_id__UNICAMP_2020": 0.6909090909090909, - "acc,exam_id__UNICAMP_2019": 0.7, - "acc,exam_id__UNICAMP_2022": 0.7948717948717948, - "acc,exam_id__USP_2021": 0.6538461538461539, - "acc,exam_id__USP_2019": 0.625, - "acc,exam_id__USP_2022": 0.673469387755102, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__USP_2023": 0.75, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__UNICAMP_2023": 0.7209302325581395, - "acc,exam_id__USP_2024": 0.8048780487804879, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7319804058782365, - "acc,exam_id__2016_2": 0.7317073170731707, - "acc,exam_id__2016": 0.6859504132231405, - "acc,exam_id__2014": 0.7614678899082569, - "acc,exam_id__2011": 0.8205128205128205, - "acc,exam_id__2015": 0.7142857142857143, - "acc,exam_id__2012": 0.7413793103448276, - "acc,exam_id__2013": 0.6481481481481481, - "acc,exam_id__2010": 0.7435897435897436, - "acc,exam_id__2017": 0.7672413793103449, - "acc,exam_id__2009": 0.7043478260869566, - "acc,exam_id__2023": 0.7777777777777778, - "acc,exam_id__2022": 0.6842105263157895 - }, - "faquad_nli": { - "f1_macro,all": 0.6906170752324599, - "acc,all": 0.7184615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8460180802244769, - "acc,all": 0.8478571428571429 - }, - "oab_exams": { - "acc,all": 0.510250569476082, - "acc,exam_id__2013-10": 0.525, - "acc,exam_id__2012-09": 0.45454545454545453, - "acc,exam_id__2010-02": 0.58, - "acc,exam_id__2015-18": 0.5, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2015-17": 0.5769230769230769, - "acc,exam_id__2011-03": 0.47474747474747475, - "acc,exam_id__2016-20": 0.5125, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2012-06a": 0.5625, - "acc,exam_id__2012-08": 0.5, - "acc,exam_id__2011-05": 0.5625, - "acc,exam_id__2014-15": 0.5512820512820513, - "acc,exam_id__2012-06": 0.525, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2012-07": 0.575, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2016-21": 0.4375, - "acc,exam_id__2017-23": 0.4875, - "acc,exam_id__2017-24": 0.55, - "acc,exam_id__2016-20a": 0.45, - "acc,exam_id__2015-16": 0.55, - "acc,exam_id__2013-11": 0.475, - "acc,exam_id__2017-22": 0.55, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2014-13": 0.55, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7355214633181597, - "acc,all": 0.7802585193889542 - }, - "tweetsentbr": { - "f1_macro,all": 0.5025508930131336, - "acc,all": 0.7069651741293532, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9259642329554806, + "acc,all": 0.926062091503268, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.714480317302389, + "mse,all": 0.9154340392156861, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.6578581363004172, + "acc,exam_id__USP_2018": 0.5370370370370371, + "acc,exam_id__USP_2020": 0.6785714285714286, + "acc,exam_id__UNICAMP_2020": 0.6909090909090909, + "acc,exam_id__UNICAMP_2019": 0.7, + "acc,exam_id__UNICAMP_2022": 0.7948717948717948, + "acc,exam_id__USP_2021": 0.6538461538461539, + "acc,exam_id__USP_2019": 0.625, + "acc,exam_id__USP_2022": 0.673469387755102, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__USP_2023": 0.75, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__UNICAMP_2023": 0.7209302325581395, + "acc,exam_id__USP_2024": 0.8048780487804879, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7319804058782365, + "acc,exam_id__2016_2": 0.7317073170731707, + "acc,exam_id__2016": 0.6859504132231405, + "acc,exam_id__2014": 0.7614678899082569, + "acc,exam_id__2011": 0.8205128205128205, + "acc,exam_id__2015": 0.7142857142857143, + "acc,exam_id__2012": 0.7413793103448276, + "acc,exam_id__2013": 0.6481481481481481, + "acc,exam_id__2010": 0.7435897435897436, + "acc,exam_id__2017": 0.7672413793103449, + "acc,exam_id__2009": 0.7043478260869566, + "acc,exam_id__2023": 0.7777777777777778, + "acc,exam_id__2022": 0.6842105263157895 + }, + "faquad_nli": { + "f1_macro,all": 0.6906170752324599, + "acc,all": 0.7184615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8460180802244769, + "acc,all": 0.8478571428571429 + }, + "oab_exams": { + "acc,all": 0.510250569476082, + "acc,exam_id__2013-10": 0.525, + "acc,exam_id__2012-09": 0.45454545454545453, + "acc,exam_id__2010-02": 0.58, + "acc,exam_id__2015-18": 0.5, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2015-17": 0.5769230769230769, + "acc,exam_id__2011-03": 0.47474747474747475, + "acc,exam_id__2016-20": 0.5125, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2012-06a": 0.5625, + "acc,exam_id__2012-08": 0.5, + "acc,exam_id__2011-05": 0.5625, + "acc,exam_id__2014-15": 0.5512820512820513, + "acc,exam_id__2012-06": 0.525, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2012-07": 0.575, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2016-21": 0.4375, + "acc,exam_id__2017-23": 0.4875, + "acc,exam_id__2017-24": 0.55, + "acc,exam_id__2016-20a": 0.45, + "acc,exam_id__2015-16": 0.55, + "acc,exam_id__2013-11": 0.475, + "acc,exam_id__2017-22": 0.55, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2014-13": 0.55, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7355214633181597, + "acc,all": 0.7802585193889542 + }, + "tweetsentbr": { + "f1_macro,all": 0.6700678573508446, + "acc,all": 0.7069651741293532, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "b749dbcb19901b8fd0e9f38c923a24533569f895", - "model_dtype": "torch.float16", - "model_memory_footprint": 27920486400, - "model_num_parameters": 13960238080, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1268.9889705882354, - "min_seq_length": 1246, - "max_seq_length": 1335, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1499.9889705882354, - "min_seq_length": 1477, - "max_seq_length": 1566, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1609.7426981919332, - "min_seq_length": 1243, - "max_seq_length": 2369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1488.9881035689293, - "min_seq_length": 1236, - "max_seq_length": 2528, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1478.1184615384616, - "min_seq_length": 1426, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "b749dbcb19901b8fd0e9f38c923a24533569f895", + "model_dtype": "torch.float16", + "model_memory_footprint": 27920486400, + "model_num_parameters": 13960238080, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1262.9178571428572, - "min_seq_length": 1239, - "max_seq_length": 1509, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1258.4145785876992, - "min_seq_length": 1003, - "max_seq_length": 1740, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1268.9889705882354, + "min_seq_length": 1246, + "max_seq_length": 1335, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1499.9889705882354, + "min_seq_length": 1477, + "max_seq_length": 1566, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1609.7426981919332, + "min_seq_length": 1243, + "max_seq_length": 2369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1488.9881035689293, + "min_seq_length": 1236, + "max_seq_length": 2528, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1478.1184615384616, + "min_seq_length": 1426, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1262.9178571428572, + "min_seq_length": 1239, + "max_seq_length": 1509, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1258.4145785876992, + "min_seq_length": 1003, + "max_seq_length": 1740, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1751.801410105758, + "min_seq_length": 1717, + "max_seq_length": 1795, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1518.6845771144278, + "min_seq_length": 1497, + "max_seq_length": 1636, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1751.801410105758, - "min_seq_length": 1717, - "max_seq_length": 1795, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1518.6845771144278, - "min_seq_length": 1497, - "max_seq_length": 1636, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/results_2024-08-08T02-43-35.640819.json b/Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/results_2024-08-08T02-43-35.640819.json index bf5ba6dfc3de934bb0c3302c61f99b69c5693c6e..4db50445fb989761c8fadcc60b32a524d8fa80e9 100644 --- a/Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/results_2024-08-08T02-43-35.640819.json +++ b/Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/results_2024-08-08T02-43-35.640819.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7016934637445373, - "all_grouped_npm": 0.5569268035545414, + "all_grouped_average": 0.7203064597820608, + "all_grouped_npm": 0.5846247143246655, "all_grouped": { "enem_challenge": 0.7319804058782365, "bluex": 0.6578581363004172, @@ -45,7 +45,7 @@ "faquad_nli": 0.6906170752324599, "hatebr_offensive": 0.8460180802244769, "portuguese_hate_speech": 0.7355214633181597, - "tweetsentbr": 0.5025508930131336 + "tweetsentbr": 0.6700678573508446 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7319804058782365, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6906170752324599, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8460180802244769, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7355214633181597, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5025508930131336 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6700678573508446 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7319804058782365, @@ -150,9 +150,9 @@ "main_score": 0.7355214633181597 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5025508930131336, + "f1_macro,all": 0.6700678573508446, "acc,all": 0.7069651741293532, - "main_score": 0.5025508930131336 + "main_score": 0.6700678573508446 } }, "config_tasks": { diff --git a/Danielbrdz/Barcenas-Llama3-8b-ORPO/raw_2024-05-18T00-12-52.690138/results.json b/Danielbrdz/Barcenas-Llama3-8b-ORPO/raw_2024-05-18T00-12-52.690138/results.json index c56faa0955f6325419985253493e0569b86eedda..e220fb78a1440d3922897a7967bdf0504d644fbc 100644 --- a/Danielbrdz/Barcenas-Llama3-8b-ORPO/raw_2024-05-18T00-12-52.690138/results.json +++ b/Danielbrdz/Barcenas-Llama3-8b-ORPO/raw_2024-05-18T00-12-52.690138/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9178150146340144, - "acc,all": 0.9178921568627451, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7260402501200387, - "mse,all": 0.6636315359477125, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5827538247566064, - "acc,exam_id__UNICAMP_2024": 0.7111111111111111, - "acc,exam_id__USP_2019": 0.6, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__UNICAMP_2023": 0.6511627906976745, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2023": 0.6818181818181818, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7102869139258222, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2017": 0.6896551724137931, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2016": 0.7024793388429752, - "acc,exam_id__2011": 0.7264957264957265, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2022": 0.6766917293233082, - "acc,exam_id__2014": 0.7155963302752294, - "acc,exam_id__2010": 0.7435897435897436, - "acc,exam_id__2009": 0.7304347826086957, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2023": 0.7555555555555555 - }, - "faquad_nli": { - "f1_macro,all": 0.7308849598805747, - "acc,all": 0.7815384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8698828946051447, - "acc,all": 0.87 - }, - "oab_exams": { - "acc,all": 0.508883826879271, - "acc,exam_id__2014-15": 0.6282051282051282, - "acc,exam_id__2012-07": 0.4625, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2015-16": 0.5, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2013-10": 0.475, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2010-02": 0.52, - "acc,exam_id__2012-06": 0.5125, - "acc,exam_id__2018-25": 0.5125, - "acc,exam_id__2011-04": 0.5125, - "acc,exam_id__2012-08": 0.5125, - "acc,exam_id__2015-18": 0.5, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2012-09": 0.4935064935064935, - "acc,exam_id__2017-24": 0.4625, - "acc,exam_id__2012-06a": 0.5375, - "acc,exam_id__2016-20": 0.5625, - "acc,exam_id__2013-12": 0.575, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2014-14": 0.575, - "acc,exam_id__2017-22": 0.6, - "acc,exam_id__2010-01": 0.4, - "acc,exam_id__2011-03": 0.48484848484848486, - "acc,exam_id__2015-17": 0.6410256410256411, - "acc,exam_id__2017-23": 0.525, - "acc,exam_id__2013-11": 0.525, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5958643988009942, - "acc,all": 0.5969447708578144 - }, - "tweetsentbr": { - "f1_macro,all": 0.4996436497852127, - "acc,all": 0.7203980099502487, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9178150146340144, + "acc,all": 0.9178921568627451, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7260402501200387, + "mse,all": 0.6636315359477125, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5827538247566064, + "acc,exam_id__UNICAMP_2024": 0.7111111111111111, + "acc,exam_id__USP_2019": 0.6, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__UNICAMP_2023": 0.6511627906976745, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2023": 0.6818181818181818, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7102869139258222, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2017": 0.6896551724137931, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2016": 0.7024793388429752, + "acc,exam_id__2011": 0.7264957264957265, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2022": 0.6766917293233082, + "acc,exam_id__2014": 0.7155963302752294, + "acc,exam_id__2010": 0.7435897435897436, + "acc,exam_id__2009": 0.7304347826086957, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2023": 0.7555555555555555 + }, + "faquad_nli": { + "f1_macro,all": 0.7308849598805747, + "acc,all": 0.7815384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8698828946051447, + "acc,all": 0.87 + }, + "oab_exams": { + "acc,all": 0.508883826879271, + "acc,exam_id__2014-15": 0.6282051282051282, + "acc,exam_id__2012-07": 0.4625, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2015-16": 0.5, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2013-10": 0.475, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2010-02": 0.52, + "acc,exam_id__2012-06": 0.5125, + "acc,exam_id__2018-25": 0.5125, + "acc,exam_id__2011-04": 0.5125, + "acc,exam_id__2012-08": 0.5125, + "acc,exam_id__2015-18": 0.5, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2012-09": 0.4935064935064935, + "acc,exam_id__2017-24": 0.4625, + "acc,exam_id__2012-06a": 0.5375, + "acc,exam_id__2016-20": 0.5625, + "acc,exam_id__2013-12": 0.575, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2014-14": 0.575, + "acc,exam_id__2017-22": 0.6, + "acc,exam_id__2010-01": 0.4, + "acc,exam_id__2011-03": 0.48484848484848486, + "acc,exam_id__2015-17": 0.6410256410256411, + "acc,exam_id__2017-23": 0.525, + "acc,exam_id__2013-11": 0.525, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5958643988009942, + "acc,all": 0.5969447708578144 + }, + "tweetsentbr": { + "f1_macro,all": 0.6661915330469502, + "acc,all": 0.7203980099502487, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "66c848c4526d3db1ec41468c0f73ac4448c6abe9", - "model_dtype": "torch.float16", - "model_memory_footprint": 16194748416, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "66c848c4526d3db1ec41468c0f73ac4448c6abe9", + "model_dtype": "torch.float16", + "model_memory_footprint": 16194748416, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Danielbrdz/Barcenas-Llama3-8b-ORPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Danielbrdz/Barcenas-Llama3-8b-ORPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/Danielbrdz/Barcenas-Llama3-8b-ORPO/results_2024-05-18T00-12-52.690138.json b/Danielbrdz/Barcenas-Llama3-8b-ORPO/results_2024-05-18T00-12-52.690138.json index 8d4ec9a45ebcc12889d780c452ee50093eccb717..df12eba30d583ccae9e314b440baafc01a56550c 100644 --- a/Danielbrdz/Barcenas-Llama3-8b-ORPO/results_2024-05-18T00-12-52.690138.json +++ b/Danielbrdz/Barcenas-Llama3-8b-ORPO/results_2024-05-18T00-12-52.690138.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6824506370430754, - "all_grouped_npm": 0.525680465043327, + "all_grouped_average": 0.7009559574054907, + "all_grouped_npm": 0.553218144154064, "all_grouped": { "enem_challenge": 0.7102869139258222, "bluex": 0.5827538247566064, @@ -45,7 +45,7 @@ "faquad_nli": 0.7308849598805747, "hatebr_offensive": 0.8698828946051447, "portuguese_hate_speech": 0.5958643988009942, - "tweetsentbr": 0.4996436497852127 + "tweetsentbr": 0.6661915330469502 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7102869139258222, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7308849598805747, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8698828946051447, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5958643988009942, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4996436497852127 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6661915330469502 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7102869139258222, @@ -150,9 +150,9 @@ "main_score": 0.5958643988009942 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4996436497852127, + "f1_macro,all": 0.6661915330469502, "acc,all": 0.7203980099502487, - "main_score": 0.4996436497852127 + "main_score": 0.6661915330469502 } }, "config_tasks": { diff --git a/DeepMount00/Llama-3-8b-Ita/raw_2024-05-19T23-04-56.757278/results.json b/DeepMount00/Llama-3-8b-Ita/raw_2024-05-19T23-04-56.757278/results.json index e0738e8307e544b2f50a242b442dbd147809e815..fae7a7a1a01fbeca2e6ccda681ccc4de837b670d 100644 --- a/DeepMount00/Llama-3-8b-Ita/raw_2024-05-19T23-04-56.757278/results.json +++ b/DeepMount00/Llama-3-8b-Ita/raw_2024-05-19T23-04-56.757278/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9174373136272109, - "acc,all": 0.9174836601307189, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7274385305362817, - "mse,all": 0.7363439542483661, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.588317107093185, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__UNICAMP_2024": 0.6444444444444445, - "acc,exam_id__USP_2019": 0.625, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2022": 0.717948717948718, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__USP_2022": 0.5306122448979592, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7116864940517844, - "acc,exam_id__2014": 0.7247706422018348, - "acc,exam_id__2017": 0.7068965517241379, - "acc,exam_id__2023": 0.762962962962963, - "acc,exam_id__2012": 0.7327586206896551, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2011": 0.7350427350427351, - "acc,exam_id__2016_2": 0.6585365853658537, - "acc,exam_id__2009": 0.7478260869565218, - "acc,exam_id__2010": 0.7094017094017094, - "acc,exam_id__2016": 0.6942148760330579, - "acc,exam_id__2015": 0.7142857142857143, - "acc,exam_id__2013": 0.6944444444444444 - }, - "faquad_nli": { - "f1_macro,all": 0.7530594601709288, - "acc,all": 0.8046153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8684530363041023, - "acc,all": 0.8685714285714285 - }, - "oab_exams": { - "acc,all": 0.5084282460136674, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2015-16": 0.4625, - "acc,exam_id__2011-03": 0.5050505050505051, - "acc,exam_id__2012-09": 0.5324675324675324, - "acc,exam_id__2016-20": 0.575, - "acc,exam_id__2014-15": 0.5641025641025641, - "acc,exam_id__2013-12": 0.55, - "acc,exam_id__2012-06": 0.5125, - "acc,exam_id__2016-19": 0.5256410256410257, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2013-10": 0.4375, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2018-25": 0.525, - "acc,exam_id__2017-23": 0.5125, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2017-24": 0.475, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2014-14": 0.625, - "acc,exam_id__2012-06a": 0.55, - "acc,exam_id__2012-08": 0.5125, - "acc,exam_id__2016-20a": 0.45, - "acc,exam_id__2012-07": 0.475, - "acc,exam_id__2010-02": 0.55, - "acc,exam_id__2015-17": 0.6410256410256411, - "acc,exam_id__2011-04": 0.5125, - "acc,exam_id__2013-11": 0.5125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6102181878747501, - "acc,all": 0.6122209165687427 - }, - "tweetsentbr": { - "f1_macro,all": 0.5047785208818167, - "acc,all": 0.7223880597014926, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9174373136272109, + "acc,all": 0.9174836601307189, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7274385305362817, + "mse,all": 0.7363439542483661, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.588317107093185, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__UNICAMP_2024": 0.6444444444444445, + "acc,exam_id__USP_2019": 0.625, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2022": 0.717948717948718, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__USP_2022": 0.5306122448979592, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7116864940517844, + "acc,exam_id__2014": 0.7247706422018348, + "acc,exam_id__2017": 0.7068965517241379, + "acc,exam_id__2023": 0.762962962962963, + "acc,exam_id__2012": 0.7327586206896551, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2011": 0.7350427350427351, + "acc,exam_id__2016_2": 0.6585365853658537, + "acc,exam_id__2009": 0.7478260869565218, + "acc,exam_id__2010": 0.7094017094017094, + "acc,exam_id__2016": 0.6942148760330579, + "acc,exam_id__2015": 0.7142857142857143, + "acc,exam_id__2013": 0.6944444444444444 + }, + "faquad_nli": { + "f1_macro,all": 0.7530594601709288, + "acc,all": 0.8046153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8684530363041023, + "acc,all": 0.8685714285714285 + }, + "oab_exams": { + "acc,all": 0.5084282460136674, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2015-16": 0.4625, + "acc,exam_id__2011-03": 0.5050505050505051, + "acc,exam_id__2012-09": 0.5324675324675324, + "acc,exam_id__2016-20": 0.575, + "acc,exam_id__2014-15": 0.5641025641025641, + "acc,exam_id__2013-12": 0.55, + "acc,exam_id__2012-06": 0.5125, + "acc,exam_id__2016-19": 0.5256410256410257, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2013-10": 0.4375, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2018-25": 0.525, + "acc,exam_id__2017-23": 0.5125, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2017-24": 0.475, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2014-14": 0.625, + "acc,exam_id__2012-06a": 0.55, + "acc,exam_id__2012-08": 0.5125, + "acc,exam_id__2016-20a": 0.45, + "acc,exam_id__2012-07": 0.475, + "acc,exam_id__2010-02": 0.55, + "acc,exam_id__2015-17": 0.6410256410256411, + "acc,exam_id__2011-04": 0.5125, + "acc,exam_id__2013-11": 0.5125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6102181878747501, + "acc,all": 0.6122209165687427 + }, + "tweetsentbr": { + "f1_macro,all": 0.6730380278424222, + "acc,all": 0.7223880597014926, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c399bd706c749788d260ed5f47c3c5c3190f37d9", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16194748416, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c399bd706c749788d260ed5f47c3c5c3190f37d9", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16194748416, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=DeepMount00/Llama-3-8b-Ita,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=DeepMount00/Llama-3-8b-Ita,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/DeepMount00/Llama-3-8b-Ita/results_2024-05-19T23-04-56.757278.json b/DeepMount00/Llama-3-8b-Ita/results_2024-05-19T23-04-56.757278.json index 9f5bdc9225a8433683de2762165cbaeb1ed25c7b..b4df11c629b2f5888c4da218c086835ac1d47a5e 100644 --- a/DeepMount00/Llama-3-8b-Ita/results_2024-05-19T23-04-56.757278.json +++ b/DeepMount00/Llama-3-8b-Ita/results_2024-05-19T23-04-56.757278.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6877574329504141, - "all_grouped_npm": 0.5347979327978014, + "all_grouped_average": 0.7064529337238148, + "all_grouped_npm": 0.5626186184725045, "all_grouped": { "enem_challenge": 0.7116864940517844, "bluex": 0.588317107093185, @@ -45,7 +45,7 @@ "faquad_nli": 0.7530594601709288, "hatebr_offensive": 0.8684530363041023, "portuguese_hate_speech": 0.6102181878747501, - "tweetsentbr": 0.5047785208818167 + "tweetsentbr": 0.6730380278424222 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7116864940517844, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7530594601709288, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8684530363041023, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6102181878747501, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5047785208818167 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6730380278424222 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7116864940517844, @@ -150,9 +150,9 @@ "main_score": 0.6102181878747501 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5047785208818167, + "f1_macro,all": 0.6730380278424222, "acc,all": 0.7223880597014926, - "main_score": 0.5047785208818167 + "main_score": 0.6730380278424222 } }, "config_tasks": { diff --git a/EleutherAI/pythia-14m/raw_2024-04-03T19-47-56.339960/results.json b/EleutherAI/pythia-14m/raw_2024-04-03T19-47-56.339960/results.json index f7b041789916d86309bc1cceb9288f90cdfc6a94..1a09fe469e44a1d4ca0ec7da54042b062d7d9340 100644 --- a/EleutherAI/pythia-14m/raw_2024-04-03T19-47-56.339960/results.json +++ b/EleutherAI/pythia-14m/raw_2024-04-03T19-47-56.339960/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.2210516588115701, - "acc,all": 0.48856209150326796, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.0006847937896062521, - "mse,all": 1.8791258169934641, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.17941585535465926, - "acc,exam_id__USP_2021": 0.19230769230769232, - "acc,exam_id__USP_2018": 0.12962962962962962, - "acc,exam_id__UNICAMP_2023": 0.27906976744186046, - "acc,exam_id__USP_2024": 0.14634146341463414, - "acc,exam_id__USP_2022": 0.1836734693877551, - "acc,exam_id__UNICAMP_2019": 0.14, - "acc,exam_id__USP_2020": 0.17857142857142858, - "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913, - "acc,exam_id__USP_2019": 0.225, - "acc,exam_id__UNICAMP_2022": 0.23076923076923078, - "acc,exam_id__UNICAMP_2024": 0.15555555555555556, - "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941, - "acc,exam_id__UNICAMP_2018": 0.2222222222222222, - "acc,exam_id__USP_2023": 0.09090909090909091, - "acc,exam_id__UNICAMP_2020": 0.16363636363636364, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.19104268719384185, - "acc,exam_id__2013": 0.16666666666666666, - "acc,exam_id__2012": 0.19827586206896552, - "acc,exam_id__2015": 0.14285714285714285, - "acc,exam_id__2016": 0.18181818181818182, - "acc,exam_id__2009": 0.16521739130434782, - "acc,exam_id__2023": 0.25925925925925924, - "acc,exam_id__2016_2": 0.1951219512195122, - "acc,exam_id__2010": 0.1623931623931624, - "acc,exam_id__2014": 0.1834862385321101, - "acc,exam_id__2022": 0.21804511278195488, - "acc,exam_id__2011": 0.19658119658119658, - "acc,exam_id__2017": 0.20689655172413793 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.17328604471858133, - "acc,all": 0.23142857142857143 - }, - "oab_exams": { - "acc,all": 0.21822323462414578, - "acc,exam_id__2011-03": 0.23232323232323232, - "acc,exam_id__2011-04": 0.25, - "acc,exam_id__2011-05": 0.2375, - "acc,exam_id__2016-19": 0.1794871794871795, - "acc,exam_id__2017-23": 0.2, - "acc,exam_id__2018-25": 0.25, - "acc,exam_id__2012-09": 0.23376623376623376, - "acc,exam_id__2017-24": 0.2125, - "acc,exam_id__2014-14": 0.2625, - "acc,exam_id__2015-17": 0.23076923076923078, - "acc,exam_id__2012-07": 0.1, - "acc,exam_id__2016-20": 0.2, - "acc,exam_id__2013-11": 0.1625, - "acc,exam_id__2016-21": 0.2125, - "acc,exam_id__2012-06a": 0.2125, - "acc,exam_id__2015-18": 0.25, - "acc,exam_id__2012-08": 0.2125, - "acc,exam_id__2013-12": 0.175, - "acc,exam_id__2012-06": 0.225, - "acc,exam_id__2015-16": 0.225, - "acc,exam_id__2013-10": 0.2, - "acc,exam_id__2014-13": 0.225, - "acc,exam_id__2010-02": 0.21, - "acc,exam_id__2014-15": 0.20512820512820512, - "acc,exam_id__2016-20a": 0.2875, - "acc,exam_id__2010-01": 0.25882352941176473, - "acc,exam_id__2017-22": 0.2375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.2692126355492692, - "acc,all": 0.6709753231492362 - }, - "tweetsentbr": { - "f1_macro,all": 0.008390382047306943, - "acc,all": 0.004975124378109453, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.33157748821735517, + "acc,all": 0.48856209150326796, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.0006847937896062521, + "mse,all": 1.8791258169934641, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.17941585535465926, + "acc,exam_id__USP_2021": 0.19230769230769232, + "acc,exam_id__USP_2018": 0.12962962962962962, + "acc,exam_id__UNICAMP_2023": 0.27906976744186046, + "acc,exam_id__USP_2024": 0.14634146341463414, + "acc,exam_id__USP_2022": 0.1836734693877551, + "acc,exam_id__UNICAMP_2019": 0.14, + "acc,exam_id__USP_2020": 0.17857142857142858, + "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913, + "acc,exam_id__USP_2019": 0.225, + "acc,exam_id__UNICAMP_2022": 0.23076923076923078, + "acc,exam_id__UNICAMP_2024": 0.15555555555555556, + "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941, + "acc,exam_id__UNICAMP_2018": 0.2222222222222222, + "acc,exam_id__USP_2023": 0.09090909090909091, + "acc,exam_id__UNICAMP_2020": 0.16363636363636364, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.19104268719384185, + "acc,exam_id__2013": 0.16666666666666666, + "acc,exam_id__2012": 0.19827586206896552, + "acc,exam_id__2015": 0.14285714285714285, + "acc,exam_id__2016": 0.18181818181818182, + "acc,exam_id__2009": 0.16521739130434782, + "acc,exam_id__2023": 0.25925925925925924, + "acc,exam_id__2016_2": 0.1951219512195122, + "acc,exam_id__2010": 0.1623931623931624, + "acc,exam_id__2014": 0.1834862385321101, + "acc,exam_id__2022": 0.21804511278195488, + "acc,exam_id__2011": 0.19658119658119658, + "acc,exam_id__2017": 0.20689655172413793 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.259929067077872, + "acc,all": 0.23142857142857143 + }, + "oab_exams": { + "acc,all": 0.21822323462414578, + "acc,exam_id__2011-03": 0.23232323232323232, + "acc,exam_id__2011-04": 0.25, + "acc,exam_id__2011-05": 0.2375, + "acc,exam_id__2016-19": 0.1794871794871795, + "acc,exam_id__2017-23": 0.2, + "acc,exam_id__2018-25": 0.25, + "acc,exam_id__2012-09": 0.23376623376623376, + "acc,exam_id__2017-24": 0.2125, + "acc,exam_id__2014-14": 0.2625, + "acc,exam_id__2015-17": 0.23076923076923078, + "acc,exam_id__2012-07": 0.1, + "acc,exam_id__2016-20": 0.2, + "acc,exam_id__2013-11": 0.1625, + "acc,exam_id__2016-21": 0.2125, + "acc,exam_id__2012-06a": 0.2125, + "acc,exam_id__2015-18": 0.25, + "acc,exam_id__2012-08": 0.2125, + "acc,exam_id__2013-12": 0.175, + "acc,exam_id__2012-06": 0.225, + "acc,exam_id__2015-16": 0.225, + "acc,exam_id__2013-10": 0.2, + "acc,exam_id__2014-13": 0.225, + "acc,exam_id__2010-02": 0.21, + "acc,exam_id__2014-15": 0.20512820512820512, + "acc,exam_id__2016-20a": 0.2875, + "acc,exam_id__2010-01": 0.25882352941176473, + "acc,exam_id__2017-22": 0.2375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4038189533239038, + "acc,all": 0.6709753231492362 + }, + "tweetsentbr": { + "f1_macro,all": 0.011187176063075921, + "acc,all": 0.004975124378109453, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 42, - "non_truncated": 14108, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 56, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "f33025648652797a390d8c54835273845b437161", - "model_dtype": "torch.float16", - "model_memory_footprint": 54087788, - "model_num_parameters": 14067712, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1378.0061274509803, - "min_seq_length": 1355, - "max_seq_length": 1444, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1514.0061274509803, - "min_seq_length": 1491, - "max_seq_length": 1580, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 26, - "non_truncated": 693, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 39, - "mean_seq_length": 1656.076495132128, - "min_seq_length": 1285, - "max_seq_length": 2440, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.945757997218359 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 16, - "non_truncated": 1413, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 17, - "mean_seq_length": 1559.0517844646606, - "min_seq_length": 1308, - "max_seq_length": 2520, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.988103568929321 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1578.8153846153846, - "min_seq_length": 1525, - "max_seq_length": 1688, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1292.5114285714285, - "min_seq_length": 1269, - "max_seq_length": 1535, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 42, + "non_truncated": 14108, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 56, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "f33025648652797a390d8c54835273845b437161", + "model_dtype": "torch.float16", + "model_memory_footprint": 54087788, + "model_num_parameters": 14067712, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1340.5503416856493, - "min_seq_length": 1077, - "max_seq_length": 1805, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1378.0061274509803, + "min_seq_length": 1355, + "max_seq_length": 1444, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1514.0061274509803, + "min_seq_length": 1491, + "max_seq_length": 1580, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 26, + "non_truncated": 693, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 39, + "mean_seq_length": 1656.076495132128, + "min_seq_length": 1285, + "max_seq_length": 2440, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.945757997218359 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 16, + "non_truncated": 1413, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 17, + "mean_seq_length": 1559.0517844646606, + "min_seq_length": 1308, + "max_seq_length": 2520, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.988103568929321 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1578.8153846153846, + "min_seq_length": 1525, + "max_seq_length": 1688, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1292.5114285714285, + "min_seq_length": 1269, + "max_seq_length": 1535, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1340.5503416856493, + "min_seq_length": 1077, + "max_seq_length": 1805, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1775.5558166862515, + "min_seq_length": 1741, + "max_seq_length": 1812, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2800995024875, + "min_seq_length": 1475, + "max_seq_length": 1554, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1775.5558166862515, - "min_seq_length": 1741, - "max_seq_length": 1812, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=EleutherAI/pythia-14m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2800995024875, - "min_seq_length": 1475, - "max_seq_length": 1554, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=EleutherAI/pythia-14m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/EleutherAI/pythia-14m/results_2024-04-03T19-47-56.339960.json b/EleutherAI/pythia-14m/results_2024-04-03T19-47-56.339960.json index fe202d78c0a1da9b92afa7a34f4da70e6761c574..58b3666e77498e01b294178d9fe92d62d40054fe 100644 --- a/EleutherAI/pythia-14m/results_2024-04-03T19-47-56.339960.json +++ b/EleutherAI/pythia-14m/results_2024-04-03T19-47-56.339960.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.1889958293891971, - "all_grouped_npm": -0.24792686543396047, + "all_grouped_average": 0.22617049200647255, + "all_grouped_npm": -0.1749423038952026, "all_grouped": { "enem_challenge": 0.19104268719384185, "bluex": 0.17941585535465926, "oab_exams": 0.21822323462414578, - "assin2_rte": 0.2210516588115701, + "assin2_rte": 0.33157748821735517, "assin2_sts": 0.0006847937896062521, "faquad_nli": 0.4396551724137931, - "hatebr_offensive": 0.17328604471858133, - "portuguese_hate_speech": 0.2692126355492692, - "tweetsentbr": 0.008390382047306943 + "hatebr_offensive": 0.259929067077872, + "portuguese_hate_speech": 0.4038189533239038, + "tweetsentbr": 0.011187176063075921 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.19104268719384185, "harness|bluex|bluex|None|3": 0.17941585535465926, "harness|oab_exams|oab_exams|None|3": 0.21822323462414578, - "harness|assin2_rte|assin2_rte|None|15": 0.2210516588115701, + "harness|assin2_rte|assin2_rte|None|15": 0.33157748821735517, "harness|assin2_sts|assin2_sts|None|15": 0.0006847937896062521, "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.17328604471858133, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2692126355492692, - "harness|tweetsentbr|tweetsentbr|None|25": 0.008390382047306943 + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.259929067077872, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4038189533239038, + "harness|tweetsentbr|tweetsentbr|None|25": 0.011187176063075921 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.19104268719384185, @@ -125,9 +125,9 @@ "main_score": 0.21822323462414578 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.2210516588115701, + "f1_macro,all": 0.33157748821735517, "acc,all": 0.48856209150326796, - "main_score": 0.2210516588115701 + "main_score": 0.33157748821735517 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.0006847937896062521, @@ -140,19 +140,19 @@ "main_score": 0.4396551724137931 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.17328604471858133, + "f1_macro,all": 0.259929067077872, "acc,all": 0.23142857142857143, - "main_score": 0.17328604471858133 + "main_score": 0.259929067077872 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.2692126355492692, + "f1_macro,all": 0.4038189533239038, "acc,all": 0.6709753231492362, - "main_score": 0.2692126355492692 + "main_score": 0.4038189533239038 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.008390382047306943, + "f1_macro,all": 0.011187176063075921, "acc,all": 0.004975124378109453, - "main_score": 0.008390382047306943 + "main_score": 0.011187176063075921 } }, "config_tasks": { diff --git a/EleutherAI/pythia-70m-deduped/raw_2024-04-03T21-10-06.848681/results.json b/EleutherAI/pythia-70m-deduped/raw_2024-04-03T21-10-06.848681/results.json index 042b946c8c0520df40c68ddd6d89d9bc35c215cb..3657d0ee6256e8b34e515b5ce4a475b64f1efcb1 100644 --- a/EleutherAI/pythia-70m-deduped/raw_2024-04-03T21-10-06.848681/results.json +++ b/EleutherAI/pythia-70m-deduped/raw_2024-04-03T21-10-06.848681/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.23382263963539596, - "acc,all": 0.5053104575163399, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.02026922309956098, - "mse,all": 2.6797467320261448, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.1835883171070932, - "acc,exam_id__UNICAMP_2022": 0.23076923076923078, - "acc,exam_id__USP_2018": 0.12962962962962962, - "acc,exam_id__UNICAMP_2024": 0.17777777777777778, - "acc,exam_id__USP_2021": 0.17307692307692307, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__UNICAMP_2019": 0.14, - "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, - "acc,exam_id__UNICAMP_2020": 0.16363636363636364, - "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, - "acc,exam_id__USP_2022": 0.1836734693877551, - "acc,exam_id__UNICAMP_2018": 0.2037037037037037, - "acc,exam_id__UNICAMP_2023": 0.3023255813953488, - "acc,exam_id__USP_2023": 0.06818181818181818, - "acc,exam_id__USP_2024": 0.12195121951219512, - "acc,exam_id__USP_2020": 0.19642857142857142, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.172148355493352, - "acc,exam_id__2012": 0.16379310344827586, - "acc,exam_id__2011": 0.17094017094017094, - "acc,exam_id__2016_2": 0.14634146341463414, - "acc,exam_id__2017": 0.1896551724137931, - "acc,exam_id__2009": 0.17391304347826086, - "acc,exam_id__2015": 0.11764705882352941, - "acc,exam_id__2014": 0.1743119266055046, - "acc,exam_id__2022": 0.18796992481203006, - "acc,exam_id__2010": 0.15384615384615385, - "acc,exam_id__2016": 0.17355371900826447, - "acc,exam_id__2013": 0.12037037037037036, - "acc,exam_id__2023": 0.2740740740740741 - }, - "faquad_nli": { - "f1_macro,all": 0.2759039805530234, - "acc,all": 0.6984615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.28076386043861, - "acc,all": 0.48857142857142855 - }, - "oab_exams": { - "acc,all": 0.2041002277904328, - "acc,exam_id__2012-08": 0.2, - "acc,exam_id__2011-04": 0.225, - "acc,exam_id__2016-20": 0.1875, - "acc,exam_id__2012-09": 0.2077922077922078, - "acc,exam_id__2016-20a": 0.25, - "acc,exam_id__2017-24": 0.2, - "acc,exam_id__2013-12": 0.15, - "acc,exam_id__2012-06a": 0.2125, - "acc,exam_id__2017-22": 0.225, - "acc,exam_id__2014-13": 0.2, - "acc,exam_id__2013-11": 0.1375, - "acc,exam_id__2015-18": 0.225, - "acc,exam_id__2011-03": 0.1717171717171717, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2011-05": 0.1875, - "acc,exam_id__2010-02": 0.21, - "acc,exam_id__2014-15": 0.19230769230769232, - "acc,exam_id__2015-16": 0.2, - "acc,exam_id__2014-14": 0.2625, - "acc,exam_id__2016-19": 0.1794871794871795, - "acc,exam_id__2017-23": 0.1875, - "acc,exam_id__2015-17": 0.24358974358974358, - "acc,exam_id__2016-21": 0.2125, - "acc,exam_id__2012-07": 0.1375, - "acc,exam_id__2013-10": 0.1875, - "acc,exam_id__2010-01": 0.2235294117647059, - "acc,exam_id__2018-25": 0.2625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.24182579976211263, - "acc,all": 0.36662749706227965 - }, - "tweetsentbr": { - "f1_macro,all": 0.13108766233766234, - "acc,all": 0.29502487562189056, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.35073395945309394, + "acc,all": 0.5053104575163399, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.02026922309956098, + "mse,all": 2.6797467320261448, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.1835883171070932, + "acc,exam_id__UNICAMP_2022": 0.23076923076923078, + "acc,exam_id__USP_2018": 0.12962962962962962, + "acc,exam_id__UNICAMP_2024": 0.17777777777777778, + "acc,exam_id__USP_2021": 0.17307692307692307, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__UNICAMP_2019": 0.14, + "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, + "acc,exam_id__UNICAMP_2020": 0.16363636363636364, + "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, + "acc,exam_id__USP_2022": 0.1836734693877551, + "acc,exam_id__UNICAMP_2018": 0.2037037037037037, + "acc,exam_id__UNICAMP_2023": 0.3023255813953488, + "acc,exam_id__USP_2023": 0.06818181818181818, + "acc,exam_id__USP_2024": 0.12195121951219512, + "acc,exam_id__USP_2020": 0.19642857142857142, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.172148355493352, + "acc,exam_id__2012": 0.16379310344827586, + "acc,exam_id__2011": 0.17094017094017094, + "acc,exam_id__2016_2": 0.14634146341463414, + "acc,exam_id__2017": 0.1896551724137931, + "acc,exam_id__2009": 0.17391304347826086, + "acc,exam_id__2015": 0.11764705882352941, + "acc,exam_id__2014": 0.1743119266055046, + "acc,exam_id__2022": 0.18796992481203006, + "acc,exam_id__2010": 0.15384615384615385, + "acc,exam_id__2016": 0.17355371900826447, + "acc,exam_id__2013": 0.12037037037037036, + "acc,exam_id__2023": 0.2740740740740741 + }, + "faquad_nli": { + "f1_macro,all": 0.4138559708295351, + "acc,all": 0.6984615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.421145790657915, + "acc,all": 0.48857142857142855 + }, + "oab_exams": { + "acc,all": 0.2041002277904328, + "acc,exam_id__2012-08": 0.2, + "acc,exam_id__2011-04": 0.225, + "acc,exam_id__2016-20": 0.1875, + "acc,exam_id__2012-09": 0.2077922077922078, + "acc,exam_id__2016-20a": 0.25, + "acc,exam_id__2017-24": 0.2, + "acc,exam_id__2013-12": 0.15, + "acc,exam_id__2012-06a": 0.2125, + "acc,exam_id__2017-22": 0.225, + "acc,exam_id__2014-13": 0.2, + "acc,exam_id__2013-11": 0.1375, + "acc,exam_id__2015-18": 0.225, + "acc,exam_id__2011-03": 0.1717171717171717, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2011-05": 0.1875, + "acc,exam_id__2010-02": 0.21, + "acc,exam_id__2014-15": 0.19230769230769232, + "acc,exam_id__2015-16": 0.2, + "acc,exam_id__2014-14": 0.2625, + "acc,exam_id__2016-19": 0.1794871794871795, + "acc,exam_id__2017-23": 0.1875, + "acc,exam_id__2015-17": 0.24358974358974358, + "acc,exam_id__2016-21": 0.2125, + "acc,exam_id__2012-07": 0.1375, + "acc,exam_id__2013-10": 0.1875, + "acc,exam_id__2010-01": 0.2235294117647059, + "acc,exam_id__2018-25": 0.2625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.3627386996431689, + "acc,all": 0.36662749706227965 + }, + "tweetsentbr": { + "f1_macro,all": 0.1747835497835498, + "acc,all": 0.29502487562189056, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 42, - "non_truncated": 14108, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 56, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e93a9faa9c77e5d09219f6c868bfc7a1bd65593c", - "model_dtype": "torch.float16", - "model_memory_footprint": 167592140, - "model_num_parameters": 70426624, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 64, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1378.0061274509803, - "min_seq_length": 1355, - "max_seq_length": 1444, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1514.0061274509803, - "min_seq_length": 1491, - "max_seq_length": 1580, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 26, - "non_truncated": 693, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 39, - "mean_seq_length": 1656.076495132128, - "min_seq_length": 1285, - "max_seq_length": 2440, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.945757997218359 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 16, - "non_truncated": 1413, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 17, - "mean_seq_length": 1559.0517844646606, - "min_seq_length": 1308, - "max_seq_length": 2520, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.988103568929321 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1578.8153846153846, - "min_seq_length": 1525, - "max_seq_length": 1688, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1292.5114285714285, - "min_seq_length": 1269, - "max_seq_length": 1535, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 42, + "non_truncated": 14108, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 56, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e93a9faa9c77e5d09219f6c868bfc7a1bd65593c", + "model_dtype": "torch.float16", + "model_memory_footprint": 167592140, + "model_num_parameters": 70426624, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 64, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1340.5503416856493, - "min_seq_length": 1077, - "max_seq_length": 1805, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1378.0061274509803, + "min_seq_length": 1355, + "max_seq_length": 1444, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1514.0061274509803, + "min_seq_length": 1491, + "max_seq_length": 1580, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 26, + "non_truncated": 693, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 39, + "mean_seq_length": 1656.076495132128, + "min_seq_length": 1285, + "max_seq_length": 2440, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.945757997218359 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 16, + "non_truncated": 1413, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 17, + "mean_seq_length": 1559.0517844646606, + "min_seq_length": 1308, + "max_seq_length": 2520, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.988103568929321 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1578.8153846153846, + "min_seq_length": 1525, + "max_seq_length": 1688, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1292.5114285714285, + "min_seq_length": 1269, + "max_seq_length": 1535, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1340.5503416856493, + "min_seq_length": 1077, + "max_seq_length": 1805, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1775.5558166862515, + "min_seq_length": 1741, + "max_seq_length": 1812, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2800995024875, + "min_seq_length": 1475, + "max_seq_length": 1554, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1775.5558166862515, - "min_seq_length": 1741, - "max_seq_length": 1812, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=EleutherAI/pythia-70m-deduped,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2800995024875, - "min_seq_length": 1475, - "max_seq_length": 1554, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=EleutherAI/pythia-70m-deduped,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/EleutherAI/pythia-70m-deduped/results_2024-04-03T21-10-06.848681.json b/EleutherAI/pythia-70m-deduped/results_2024-04-03T21-10-06.848681.json index 1fbc11419a1be511fa20947f0e165a2426952f68..ef360fe65ec32912c35984b04bff5c2abd409e77 100644 --- a/EleutherAI/pythia-70m-deduped/results_2024-04-03T21-10-06.848681.json +++ b/EleutherAI/pythia-70m-deduped/results_2024-04-03T21-10-06.848681.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.19372334069080482, - "all_grouped_npm": -0.24214649119144413, + "all_grouped_average": 0.25592934376196685, + "all_grouped_npm": -0.12378239539395361, "all_grouped": { "enem_challenge": 0.172148355493352, "bluex": 0.1835883171070932, "oab_exams": 0.2041002277904328, - "assin2_rte": 0.23382263963539596, + "assin2_rte": 0.35073395945309394, "assin2_sts": 0.02026922309956098, - "faquad_nli": 0.2759039805530234, - "hatebr_offensive": 0.28076386043861, - "portuguese_hate_speech": 0.24182579976211263, - "tweetsentbr": 0.13108766233766234 + "faquad_nli": 0.4138559708295351, + "hatebr_offensive": 0.421145790657915, + "portuguese_hate_speech": 0.3627386996431689, + "tweetsentbr": 0.1747835497835498 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.172148355493352, "harness|bluex|bluex|None|3": 0.1835883171070932, "harness|oab_exams|oab_exams|None|3": 0.2041002277904328, - "harness|assin2_rte|assin2_rte|None|15": 0.23382263963539596, + "harness|assin2_rte|assin2_rte|None|15": 0.35073395945309394, "harness|assin2_sts|assin2_sts|None|15": 0.02026922309956098, - "harness|faquad_nli|faquad_nli|None|15": 0.2759039805530234, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.28076386043861, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.24182579976211263, - "harness|tweetsentbr|tweetsentbr|None|25": 0.13108766233766234 + "harness|faquad_nli|faquad_nli|None|15": 0.4138559708295351, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.421145790657915, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3627386996431689, + "harness|tweetsentbr|tweetsentbr|None|25": 0.1747835497835498 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.172148355493352, @@ -125,9 +125,9 @@ "main_score": 0.2041002277904328 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.23382263963539596, + "f1_macro,all": 0.35073395945309394, "acc,all": 0.5053104575163399, - "main_score": 0.23382263963539596 + "main_score": 0.35073395945309394 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.02026922309956098, @@ -135,24 +135,24 @@ "main_score": 0.02026922309956098 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.2759039805530234, + "f1_macro,all": 0.4138559708295351, "acc,all": 0.6984615384615385, - "main_score": 0.2759039805530234 + "main_score": 0.4138559708295351 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.28076386043861, + "f1_macro,all": 0.421145790657915, "acc,all": 0.48857142857142855, - "main_score": 0.28076386043861 + "main_score": 0.421145790657915 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.24182579976211263, + "f1_macro,all": 0.3627386996431689, "acc,all": 0.36662749706227965, - "main_score": 0.24182579976211263 + "main_score": 0.3627386996431689 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.13108766233766234, + "f1_macro,all": 0.1747835497835498, "acc,all": 0.29502487562189056, - "main_score": 0.13108766233766234 + "main_score": 0.1747835497835498 } }, "config_tasks": { diff --git a/EleutherAI/pythia-70m/raw_2024-04-24T21-25-37.361813/results.json b/EleutherAI/pythia-70m/raw_2024-04-24T21-25-37.361813/results.json index 22f538ba0b73b0514a16495ee8d81cde0a9412db..73035f3f0dcffb81ccd3872d311d4f1fd0244c0e 100644 --- a/EleutherAI/pythia-70m/raw_2024-04-24T21-25-37.361813/results.json +++ b/EleutherAI/pythia-70m/raw_2024-04-24T21-25-37.361813/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.4502521949740358, - "acc,all": 0.5171568627450981, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.006173005990956128, - "mse,all": 2.2616870915032687, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.2086230876216968, - "acc,exam_id__UNICAMP_2020": 0.2, - "acc,exam_id__UNICAMP_2023": 0.37209302325581395, - "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739, - "acc,exam_id__UNICAMP_2022": 0.28205128205128205, - "acc,exam_id__USP_2018": 0.07407407407407407, - "acc,exam_id__USP_2019": 0.15, - "acc,exam_id__USP_2020": 0.21428571428571427, - "acc,exam_id__USP_2023": 0.09090909090909091, - "acc,exam_id__USP_2024": 0.12195121951219512, - "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, - "acc,exam_id__UNICAMP_2018": 0.3148148148148148, - "acc,exam_id__UNICAMP_2019": 0.22, - "acc,exam_id__USP_2021": 0.17307692307692307, - "acc,exam_id__UNICAMP_2024": 0.2222222222222222, - "acc,exam_id__USP_2022": 0.10204081632653061, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.0622813156053184, - "acc,exam_id__2015": 0.058823529411764705, - "acc,exam_id__2017": 0.10344827586206896, - "acc,exam_id__2009": 0.034782608695652174, - "acc,exam_id__2010": 0.05982905982905983, - "acc,exam_id__2011": 0.042735042735042736, - "acc,exam_id__2023": 0.05925925925925926, - "acc,exam_id__2014": 0.03669724770642202, - "acc,exam_id__2016_2": 0.04878048780487805, - "acc,exam_id__2022": 0.06766917293233082, - "acc,exam_id__2012": 0.13793103448275862, - "acc,exam_id__2016": 0.06611570247933884, - "acc,exam_id__2013": 0.027777777777777776 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.419144092439547, - "acc,all": 0.5042857142857143 - }, - "oab_exams": { - "acc,all": 0.030068337129840545, - "acc,exam_id__2016-21": 0.025, - "acc,exam_id__2013-10": 0.0125, - "acc,exam_id__2017-22": 0.0625, - "acc,exam_id__2017-23": 0.0125, - "acc,exam_id__2010-01": 0.011764705882352941, - "acc,exam_id__2011-04": 0.025, - "acc,exam_id__2012-06a": 0.025, - "acc,exam_id__2013-12": 0.0125, - "acc,exam_id__2016-20a": 0.025, - "acc,exam_id__2014-15": 0.01282051282051282, - "acc,exam_id__2014-14": 0.0125, - "acc,exam_id__2011-03": 0.04040404040404041, - "acc,exam_id__2016-20": 0.025, - "acc,exam_id__2012-09": 0.03896103896103896, - "acc,exam_id__2011-05": 0.0625, - "acc,exam_id__2010-02": 0.03, - "acc,exam_id__2018-25": 0.025, - "acc,exam_id__2012-06": 0.0625, - "acc,exam_id__2017-24": 0.05, - "acc,exam_id__2012-07": 0.025, - "acc,exam_id__2015-17": 0.05128205128205128, - "acc,exam_id__2012-08": 0.025, - "acc,exam_id__2015-18": 0.0375, - "acc,exam_id__2016-19": 0.02564102564102564, - "acc,exam_id__2015-16": 0.025, - "acc,exam_id__2014-13": 0.025, - "acc,exam_id__2013-11": 0.025, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.3087375175771073, - "acc,all": 0.33137485311398357 - }, - "tweetsentbr": { - "f1_macro,all": 0.12087469376644588, - "acc,all": 0.2945273631840796, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.4502521949740358, + "acc,all": 0.5171568627450981, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.006173005990956128, + "mse,all": 2.2616870915032687, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.2086230876216968, + "acc,exam_id__UNICAMP_2020": 0.2, + "acc,exam_id__UNICAMP_2023": 0.37209302325581395, + "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739, + "acc,exam_id__UNICAMP_2022": 0.28205128205128205, + "acc,exam_id__USP_2018": 0.07407407407407407, + "acc,exam_id__USP_2019": 0.15, + "acc,exam_id__USP_2020": 0.21428571428571427, + "acc,exam_id__USP_2023": 0.09090909090909091, + "acc,exam_id__USP_2024": 0.12195121951219512, + "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, + "acc,exam_id__UNICAMP_2018": 0.3148148148148148, + "acc,exam_id__UNICAMP_2019": 0.22, + "acc,exam_id__USP_2021": 0.17307692307692307, + "acc,exam_id__UNICAMP_2024": 0.2222222222222222, + "acc,exam_id__USP_2022": 0.10204081632653061, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.0622813156053184, + "acc,exam_id__2015": 0.058823529411764705, + "acc,exam_id__2017": 0.10344827586206896, + "acc,exam_id__2009": 0.034782608695652174, + "acc,exam_id__2010": 0.05982905982905983, + "acc,exam_id__2011": 0.042735042735042736, + "acc,exam_id__2023": 0.05925925925925926, + "acc,exam_id__2014": 0.03669724770642202, + "acc,exam_id__2016_2": 0.04878048780487805, + "acc,exam_id__2022": 0.06766917293233082, + "acc,exam_id__2012": 0.13793103448275862, + "acc,exam_id__2016": 0.06611570247933884, + "acc,exam_id__2013": 0.027777777777777776 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.419144092439547, + "acc,all": 0.5042857142857143 + }, + "oab_exams": { + "acc,all": 0.030068337129840545, + "acc,exam_id__2016-21": 0.025, + "acc,exam_id__2013-10": 0.0125, + "acc,exam_id__2017-22": 0.0625, + "acc,exam_id__2017-23": 0.0125, + "acc,exam_id__2010-01": 0.011764705882352941, + "acc,exam_id__2011-04": 0.025, + "acc,exam_id__2012-06a": 0.025, + "acc,exam_id__2013-12": 0.0125, + "acc,exam_id__2016-20a": 0.025, + "acc,exam_id__2014-15": 0.01282051282051282, + "acc,exam_id__2014-14": 0.0125, + "acc,exam_id__2011-03": 0.04040404040404041, + "acc,exam_id__2016-20": 0.025, + "acc,exam_id__2012-09": 0.03896103896103896, + "acc,exam_id__2011-05": 0.0625, + "acc,exam_id__2010-02": 0.03, + "acc,exam_id__2018-25": 0.025, + "acc,exam_id__2012-06": 0.0625, + "acc,exam_id__2017-24": 0.05, + "acc,exam_id__2012-07": 0.025, + "acc,exam_id__2015-17": 0.05128205128205128, + "acc,exam_id__2012-08": 0.025, + "acc,exam_id__2015-18": 0.0375, + "acc,exam_id__2016-19": 0.02564102564102564, + "acc,exam_id__2015-16": 0.025, + "acc,exam_id__2014-13": 0.025, + "acc,exam_id__2013-11": 0.025, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.3087375175771073, + "acc,all": 0.33137485311398357 + }, + "tweetsentbr": { + "f1_macro,all": 0.16116625835526113, + "acc,all": 0.2945273631840796, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 42, - "non_truncated": 14108, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 56, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "a39f36b100fe8a5377810d56c3f4789b9c53ac42", - "model_dtype": "torch.float16", - "model_memory_footprint": 167592140, - "model_num_parameters": 70426624, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1378.0061274509803, - "min_seq_length": 1355, - "max_seq_length": 1444, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1514.0061274509803, - "min_seq_length": 1491, - "max_seq_length": 1580, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 26, - "non_truncated": 693, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 39, - "mean_seq_length": 1656.076495132128, - "min_seq_length": 1285, - "max_seq_length": 2440, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.945757997218359 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 16, - "non_truncated": 1413, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 17, - "mean_seq_length": 1559.0517844646606, - "min_seq_length": 1308, - "max_seq_length": 2520, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.988103568929321 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1578.8153846153846, - "min_seq_length": 1525, - "max_seq_length": 1688, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 42, + "non_truncated": 14108, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 56, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "a39f36b100fe8a5377810d56c3f4789b9c53ac42", + "model_dtype": "torch.float16", + "model_memory_footprint": 167592140, + "model_num_parameters": 70426624, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1292.5114285714285, - "min_seq_length": 1269, - "max_seq_length": 1535, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1340.5503416856493, - "min_seq_length": 1077, - "max_seq_length": 1805, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1378.0061274509803, + "min_seq_length": 1355, + "max_seq_length": 1444, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1514.0061274509803, + "min_seq_length": 1491, + "max_seq_length": 1580, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 26, + "non_truncated": 693, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 39, + "mean_seq_length": 1656.076495132128, + "min_seq_length": 1285, + "max_seq_length": 2440, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.945757997218359 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 16, + "non_truncated": 1413, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 17, + "mean_seq_length": 1559.0517844646606, + "min_seq_length": 1308, + "max_seq_length": 2520, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.988103568929321 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1578.8153846153846, + "min_seq_length": 1525, + "max_seq_length": 1688, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1292.5114285714285, + "min_seq_length": 1269, + "max_seq_length": 1535, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1340.5503416856493, + "min_seq_length": 1077, + "max_seq_length": 1805, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1775.5558166862515, + "min_seq_length": 1741, + "max_seq_length": 1812, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2800995024875, + "min_seq_length": 1475, + "max_seq_length": 1554, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1775.5558166862515, - "min_seq_length": 1741, - "max_seq_length": 1812, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=EleutherAI/pythia-70m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2800995024875, - "min_seq_length": 1475, - "max_seq_length": 1554, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=EleutherAI/pythia-70m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/EleutherAI/pythia-70m/results_2024-04-24T21-25-37.361813.json b/EleutherAI/pythia-70m/results_2024-04-24T21-25-37.361813.json index 85e8868de02637b60a3466acc18df80166b447b8..4b76097734cd406c827af3fafacc3f8f88ce4c05 100644 --- a/EleutherAI/pythia-70m/results_2024-04-24T21-25-37.361813.json +++ b/EleutherAI/pythia-70m/results_2024-04-24T21-25-37.361813.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.2273121575020823, - "all_grouped_npm": -0.15629152255175874, + "all_grouped_average": 0.2317889980119507, + "all_grouped_npm": -0.14962955750731177, "all_grouped": { "enem_challenge": 0.0622813156053184, "bluex": 0.2086230876216968, @@ -45,7 +45,7 @@ "faquad_nli": 0.4396551724137931, "hatebr_offensive": 0.419144092439547, "portuguese_hate_speech": 0.3087375175771073, - "tweetsentbr": 0.12087469376644588 + "tweetsentbr": 0.16116625835526113 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.0622813156053184, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.419144092439547, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3087375175771073, - "harness|tweetsentbr|tweetsentbr|None|25": 0.12087469376644588 + "harness|tweetsentbr|tweetsentbr|None|25": 0.16116625835526113 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.0622813156053184, @@ -150,9 +150,9 @@ "main_score": 0.3087375175771073 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.12087469376644588, + "f1_macro,all": 0.16116625835526113, "acc,all": 0.2945273631840796, - "main_score": 0.12087469376644588 + "main_score": 0.16116625835526113 } }, "config_tasks": { diff --git a/FuseAI/FuseChat-7B-VaRM/raw_2024-03-08T15-26-39.517660/results.json b/FuseAI/FuseChat-7B-VaRM/raw_2024-03-08T15-26-39.517660/results.json index 2344c7874dc1ec7f01bf4efb7d8f3730d86809f4..ae3cd78501d5f6588781ac77f95f89c8f7c4d20e 100644 --- a/FuseAI/FuseChat-7B-VaRM/raw_2024-03-08T15-26-39.517660/results.json +++ b/FuseAI/FuseChat-7B-VaRM/raw_2024-03-08T15-26-39.517660/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9272868051476477, - "acc,all": 0.9272875816993464, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7836651113903375, - "mse,all": 0.415110294117647, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5493741307371349, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "acc,exam_id__USP_2021": 0.5769230769230769, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2023": 0.5581395348837209, - "acc,exam_id__USP_2018": 0.5370370370370371, - "acc,exam_id__USP_2022": 0.6122448979591837, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2018": 0.5925925925925926, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6480055983205039, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2012": 0.646551724137931, - "acc,exam_id__2016": 0.5950413223140496, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2023": 0.6666666666666666, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2015": 0.6302521008403361, - "acc,exam_id__2010": 0.6495726495726496, - "acc,exam_id__2016_2": 0.6422764227642277, - "acc,exam_id__2014": 0.6605504587155964 - }, - "faquad_nli": { - "f1_macro,all": 0.787259111855886, - "acc,all": 0.8461538461538461, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8223021238433512, - "acc,all": 0.8257142857142857 - }, - "oab_exams": { - "acc,all": 0.4182232346241458, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2018-25": 0.4, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2013-10": 0.3625, - "acc,exam_id__2015-16": 0.4375, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2015-18": 0.45, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2012-06a": 0.4125, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2017-24": 0.4625, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2011-03": 0.3333333333333333, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6973371097488426, - "acc,all": 0.7414806110458284 - }, - "tweetsentbr": { - "f1_macro,all": 0.44067858320963216, - "acc,all": 0.6666666666666666, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9272868051476477, + "acc,all": 0.9272875816993464, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7836651113903375, + "mse,all": 0.415110294117647, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5493741307371349, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "acc,exam_id__USP_2021": 0.5769230769230769, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2023": 0.5581395348837209, + "acc,exam_id__USP_2018": 0.5370370370370371, + "acc,exam_id__USP_2022": 0.6122448979591837, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2018": 0.5925925925925926, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6480055983205039, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2012": 0.646551724137931, + "acc,exam_id__2016": 0.5950413223140496, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2023": 0.6666666666666666, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2015": 0.6302521008403361, + "acc,exam_id__2010": 0.6495726495726496, + "acc,exam_id__2016_2": 0.6422764227642277, + "acc,exam_id__2014": 0.6605504587155964 + }, + "faquad_nli": { + "f1_macro,all": 0.787259111855886, + "acc,all": 0.8461538461538461, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8223021238433512, + "acc,all": 0.8257142857142857 + }, + "oab_exams": { + "acc,all": 0.4182232346241458, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2018-25": 0.4, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2013-10": 0.3625, + "acc,exam_id__2015-16": 0.4375, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2015-18": 0.45, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2012-06a": 0.4125, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2017-24": 0.4625, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2011-03": 0.3333333333333333, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6973371097488426, + "acc,all": 0.7414806110458284 + }, + "tweetsentbr": { + "f1_macro,all": 0.5875714442795096, + "acc,all": 0.6666666666666666, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "cda1eebe5ba3912c900045ed7847600e29b22c64", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14617722880, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:1", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1584.7455065359477, - "min_seq_length": 1561, - "max_seq_length": 1651, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1824.7455065359477, - "min_seq_length": 1801, - "max_seq_length": 1891, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1782.9262865090404, - "min_seq_length": 1406, - "max_seq_length": 2583, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1683.039188243527, - "min_seq_length": 1417, - "max_seq_length": 2681, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1825.9876923076922, - "min_seq_length": 1770, - "max_seq_length": 1946, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1676.3878571428572, - "min_seq_length": 1653, - "max_seq_length": 1927, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "cda1eebe5ba3912c900045ed7847600e29b22c64", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14617722880, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:1", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1428.764464692483, - "min_seq_length": 1162, - "max_seq_length": 1931, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1584.7455065359477, + "min_seq_length": 1561, + "max_seq_length": 1651, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1824.7455065359477, + "min_seq_length": 1801, + "max_seq_length": 1891, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1782.9262865090404, + "min_seq_length": 1406, + "max_seq_length": 2583, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1683.039188243527, + "min_seq_length": 1417, + "max_seq_length": 2681, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1825.9876923076922, + "min_seq_length": 1770, + "max_seq_length": 1946, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1676.3878571428572, + "min_seq_length": 1653, + "max_seq_length": 1927, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1428.764464692483, + "min_seq_length": 1162, + "max_seq_length": 1931, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2177.3360752056406, + "min_seq_length": 2142, + "max_seq_length": 2216, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1923.2492537313433, + "min_seq_length": 1902, + "max_seq_length": 2018, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2177.3360752056406, - "min_seq_length": 2142, - "max_seq_length": 2216, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=FuseAI/FuseChat-7B-VaRM,dtype=bfloat16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1923.2492537313433, - "min_seq_length": 1902, - "max_seq_length": 2018, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=FuseAI/FuseChat-7B-VaRM,dtype=bfloat16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/FuseAI/FuseChat-7B-VaRM/results_2024-03-08T15-26-39.517660.json b/FuseAI/FuseChat-7B-VaRM/results_2024-03-08T15-26-39.517660.json index 7ac73c2ffb021b6e1e65e560233418f89a12755a..7847b49d7f842c57a45f7ef9ea2b9927971d0e87 100644 --- a/FuseAI/FuseChat-7B-VaRM/results_2024-03-08T15-26-39.517660.json +++ b/FuseAI/FuseChat-7B-VaRM/results_2024-03-08T15-26-39.517660.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6749035343197202, - "all_grouped_npm": 0.5201529644708365, + "all_grouped_average": 0.6912249633274844, + "all_grouped_npm": 0.5444408052561999, "all_grouped": { "enem_challenge": 0.6480055983205039, "bluex": 0.5493741307371349, @@ -45,7 +45,7 @@ "faquad_nli": 0.787259111855886, "hatebr_offensive": 0.8223021238433512, "portuguese_hate_speech": 0.6973371097488426, - "tweetsentbr": 0.44067858320963216 + "tweetsentbr": 0.5875714442795096 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6480055983205039, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.787259111855886, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8223021238433512, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6973371097488426, - "harness|tweetsentbr|tweetsentbr|None|25": 0.44067858320963216 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5875714442795096 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6480055983205039, @@ -150,9 +150,9 @@ "main_score": 0.6973371097488426 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.44067858320963216, + "f1_macro,all": 0.5875714442795096, "acc,all": 0.6666666666666666, - "main_score": 0.44067858320963216 + "main_score": 0.5875714442795096 } }, "config_tasks": { diff --git a/GritLM/GritLM-7B-KTO/raw_2024-06-15T00-03-38.180499/results.json b/GritLM/GritLM-7B-KTO/raw_2024-06-15T00-03-38.180499/results.json index 47450c3aff507229d2b793bd00aa2cf00ea74774..04f30e2b8640c7513ad86046ebccbf8e6e156478 100644 --- a/GritLM/GritLM-7B-KTO/raw_2024-06-15T00-03-38.180499/results.json +++ b/GritLM/GritLM-7B-KTO/raw_2024-06-15T00-03-38.180499/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9203424859265217, - "acc,all": 0.9203431372549019, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7712035054624826, - "mse,all": 0.5456045751633987, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5104311543810849, - "acc,exam_id__UNICAMP_2023": 0.4883720930232558, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2019": 0.5, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "acc,exam_id__UNICAMP_2020": 0.509090909090909, - "acc,exam_id__USP_2021": 0.5, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2022": 0.5714285714285714, - "acc,exam_id__UNICAMP_2021_2": 0.35294117647058826, - "acc,exam_id__USP_2024": 0.6341463414634146, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5808257522743177, - "acc,exam_id__2017": 0.603448275862069, - "acc,exam_id__2014": 0.5871559633027523, - "acc,exam_id__2010": 0.5982905982905983, - "acc,exam_id__2011": 0.6239316239316239, - "acc,exam_id__2015": 0.5966386554621849, - "acc,exam_id__2013": 0.5462962962962963, - "acc,exam_id__2023": 0.5925925925925926, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2016_2": 0.5691056910569106, - "acc,exam_id__2012": 0.5517241379310345, - "acc,exam_id__2016": 0.5537190082644629, - "acc,exam_id__2009": 0.5217391304347826 - }, - "faquad_nli": { - "f1_macro,all": 0.7270998493492381, - "acc,all": 0.7646153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7778660429262838, - "acc,all": 0.7842857142857143 - }, - "oab_exams": { - "acc,all": 0.4031890660592255, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2015-18": 0.425, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2012-06": 0.425, - "acc,exam_id__2017-23": 0.375, - "acc,exam_id__2010-01": 0.4, - "acc,exam_id__2011-04": 0.3125, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2014-13": 0.3125, - "acc,exam_id__2016-20": 0.35, - "acc,exam_id__2013-10": 0.275, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2016-21": 0.3375, - "acc,exam_id__2012-06a": 0.4, - "acc,exam_id__2012-07": 0.4375, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2013-11": 0.4125, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2017-24": 0.3875, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2013-12": 0.4875, - "acc,exam_id__2017-22": 0.4125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.71169854421558, - "acc,all": 0.7532314923619271 - }, - "tweetsentbr": { - "f1_macro,all": 0.4509015535405497, - "acc,all": 0.673134328358209, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9203424859265217, + "acc,all": 0.9203431372549019, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7712035054624826, + "mse,all": 0.5456045751633987, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5104311543810849, + "acc,exam_id__UNICAMP_2023": 0.4883720930232558, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2019": 0.5, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "acc,exam_id__UNICAMP_2020": 0.509090909090909, + "acc,exam_id__USP_2021": 0.5, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2022": 0.5714285714285714, + "acc,exam_id__UNICAMP_2021_2": 0.35294117647058826, + "acc,exam_id__USP_2024": 0.6341463414634146, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5808257522743177, + "acc,exam_id__2017": 0.603448275862069, + "acc,exam_id__2014": 0.5871559633027523, + "acc,exam_id__2010": 0.5982905982905983, + "acc,exam_id__2011": 0.6239316239316239, + "acc,exam_id__2015": 0.5966386554621849, + "acc,exam_id__2013": 0.5462962962962963, + "acc,exam_id__2023": 0.5925925925925926, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2016_2": 0.5691056910569106, + "acc,exam_id__2012": 0.5517241379310345, + "acc,exam_id__2016": 0.5537190082644629, + "acc,exam_id__2009": 0.5217391304347826 + }, + "faquad_nli": { + "f1_macro,all": 0.7270998493492381, + "acc,all": 0.7646153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7778660429262838, + "acc,all": 0.7842857142857143 + }, + "oab_exams": { + "acc,all": 0.4031890660592255, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2015-18": 0.425, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2012-06": 0.425, + "acc,exam_id__2017-23": 0.375, + "acc,exam_id__2010-01": 0.4, + "acc,exam_id__2011-04": 0.3125, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2014-13": 0.3125, + "acc,exam_id__2016-20": 0.35, + "acc,exam_id__2013-10": 0.275, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2016-21": 0.3375, + "acc,exam_id__2012-06a": 0.4, + "acc,exam_id__2012-07": 0.4375, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2013-11": 0.4125, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2017-24": 0.3875, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2013-12": 0.4875, + "acc,exam_id__2017-22": 0.4125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.71169854421558, + "acc,all": 0.7532314923619271 + }, + "tweetsentbr": { + "f1_macro,all": 0.6012020713873997, + "acc,all": 0.673134328358209, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "b5c48669508c1de18c698460c187f64e90e7df44", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1554.7455065359477, - "min_seq_length": 1531, - "max_seq_length": 1621, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1754.7455065359477, - "min_seq_length": 1731, - "max_seq_length": 1821, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1705.9262865090404, - "min_seq_length": 1329, - "max_seq_length": 2506, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1606.039188243527, - "min_seq_length": 1340, - "max_seq_length": 2604, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1765.9876923076922, - "min_seq_length": 1710, - "max_seq_length": 1886, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "b5c48669508c1de18c698460c187f64e90e7df44", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1633.3878571428572, - "min_seq_length": 1610, - "max_seq_length": 1884, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1350.764464692483, - "min_seq_length": 1084, - "max_seq_length": 1853, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1554.7455065359477, + "min_seq_length": 1531, + "max_seq_length": 1621, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1754.7455065359477, + "min_seq_length": 1731, + "max_seq_length": 1821, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1705.9262865090404, + "min_seq_length": 1329, + "max_seq_length": 2506, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1606.039188243527, + "min_seq_length": 1340, + "max_seq_length": 2604, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1765.9876923076922, + "min_seq_length": 1710, + "max_seq_length": 1886, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1633.3878571428572, + "min_seq_length": 1610, + "max_seq_length": 1884, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1350.764464692483, + "min_seq_length": 1084, + "max_seq_length": 1853, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2131.3360752056406, + "min_seq_length": 2096, + "max_seq_length": 2170, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1877.2492537313433, + "min_seq_length": 1856, + "max_seq_length": 1972, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2131.3360752056406, - "min_seq_length": 2096, - "max_seq_length": 2170, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=GritLM/GritLM-7B-KTO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1877.2492537313433, - "min_seq_length": 1856, - "max_seq_length": 1972, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=GritLM/GritLM-7B-KTO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/GritLM/GritLM-7B-KTO/results_2024-06-15T00-03-38.180499.json b/GritLM/GritLM-7B-KTO/results_2024-06-15T00-03-38.180499.json index 8df95b8be4db7de00437c336bb7607349dbac35b..52eed3c5833a7d498ab970f9087b7b94a59b387c 100644 --- a/GritLM/GritLM-7B-KTO/results_2024-06-15T00-03-38.180499.json +++ b/GritLM/GritLM-7B-KTO/results_2024-06-15T00-03-38.180499.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6503953282372537, - "all_grouped_npm": 0.48267510153705995, + "all_grouped_average": 0.6670953857757926, + "all_grouped_npm": 0.5075263776360761, "all_grouped": { "enem_challenge": 0.5808257522743177, "bluex": 0.5104311543810849, @@ -45,7 +45,7 @@ "faquad_nli": 0.7270998493492381, "hatebr_offensive": 0.7778660429262838, "portuguese_hate_speech": 0.71169854421558, - "tweetsentbr": 0.4509015535405497 + "tweetsentbr": 0.6012020713873997 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5808257522743177, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7270998493492381, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7778660429262838, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.71169854421558, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4509015535405497 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6012020713873997 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5808257522743177, @@ -150,9 +150,9 @@ "main_score": 0.71169854421558 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4509015535405497, + "f1_macro,all": 0.6012020713873997, "acc,all": 0.673134328358209, - "main_score": 0.4509015535405497 + "main_score": 0.6012020713873997 } }, "config_tasks": { diff --git a/GritLM/GritLM-7B/raw_2024-06-12T20-31-09.833902/results.json b/GritLM/GritLM-7B/raw_2024-06-12T20-31-09.833902/results.json index 0296c69f50170963a31cc34b7b26aa90576f9fbb..0b9abc908fd9db8677a74b0e792631865af4320f 100644 --- a/GritLM/GritLM-7B/raw_2024-06-12T20-31-09.833902/results.json +++ b/GritLM/GritLM-7B/raw_2024-06-12T20-31-09.833902/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9128900575777901, - "acc,all": 0.9129901960784313, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7210345169515866, - "mse,all": 0.6111764705882352, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.49235048678720444, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__USP_2022": 0.3469387755102041, - "acc,exam_id__UNICAMP_2019": 0.46, - "acc,exam_id__UNICAMP_2023": 0.5581395348837209, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__UNICAMP_2020": 0.4909090909090909, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6116165150454863, - "acc,exam_id__2014": 0.5596330275229358, - "acc,exam_id__2009": 0.6347826086956522, - "acc,exam_id__2017": 0.6293103448275862, - "acc,exam_id__2016": 0.5619834710743802, - "acc,exam_id__2012": 0.5603448275862069, - "acc,exam_id__2013": 0.6203703703703703, - "acc,exam_id__2015": 0.5798319327731093, - "acc,exam_id__2023": 0.6444444444444445, - "acc,exam_id__2022": 0.6541353383458647, - "acc,exam_id__2010": 0.5811965811965812, - "acc,exam_id__2016_2": 0.6178861788617886, - "acc,exam_id__2011": 0.6837606837606838 - }, - "faquad_nli": { - "f1_macro,all": 0.8085014118262378, - "acc,all": 0.8630769230769231, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8066603535353536, - "acc,all": 0.81 - }, - "oab_exams": { - "acc,all": 0.40501138952164006, - "acc,exam_id__2013-11": 0.375, - "acc,exam_id__2011-05": 0.3875, - "acc,exam_id__2016-20a": 0.325, - "acc,exam_id__2018-25": 0.4, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2014-13": 0.375, - "acc,exam_id__2017-23": 0.3875, - "acc,exam_id__2010-02": 0.51, - "acc,exam_id__2015-17": 0.5384615384615384, - "acc,exam_id__2012-06a": 0.4125, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2013-10": 0.3125, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2017-22": 0.3875, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2017-24": 0.425, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2015-18": 0.375, - "acc,exam_id__2016-19": 0.48717948717948717, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2010-01": 0.36470588235294116, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.714677578568343, - "acc,all": 0.7473560517038778 - }, - "tweetsentbr": { - "f1_macro,all": 0.4531216849342852, - "acc,all": 0.6686567164179105, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9128900575777901, + "acc,all": 0.9129901960784313, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7210345169515866, + "mse,all": 0.6111764705882352, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.49235048678720444, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__USP_2022": 0.3469387755102041, + "acc,exam_id__UNICAMP_2019": 0.46, + "acc,exam_id__UNICAMP_2023": 0.5581395348837209, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__UNICAMP_2020": 0.4909090909090909, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6116165150454863, + "acc,exam_id__2014": 0.5596330275229358, + "acc,exam_id__2009": 0.6347826086956522, + "acc,exam_id__2017": 0.6293103448275862, + "acc,exam_id__2016": 0.5619834710743802, + "acc,exam_id__2012": 0.5603448275862069, + "acc,exam_id__2013": 0.6203703703703703, + "acc,exam_id__2015": 0.5798319327731093, + "acc,exam_id__2023": 0.6444444444444445, + "acc,exam_id__2022": 0.6541353383458647, + "acc,exam_id__2010": 0.5811965811965812, + "acc,exam_id__2016_2": 0.6178861788617886, + "acc,exam_id__2011": 0.6837606837606838 + }, + "faquad_nli": { + "f1_macro,all": 0.8085014118262378, + "acc,all": 0.8630769230769231, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8066603535353536, + "acc,all": 0.81 + }, + "oab_exams": { + "acc,all": 0.40501138952164006, + "acc,exam_id__2013-11": 0.375, + "acc,exam_id__2011-05": 0.3875, + "acc,exam_id__2016-20a": 0.325, + "acc,exam_id__2018-25": 0.4, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2014-13": 0.375, + "acc,exam_id__2017-23": 0.3875, + "acc,exam_id__2010-02": 0.51, + "acc,exam_id__2015-17": 0.5384615384615384, + "acc,exam_id__2012-06a": 0.4125, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2013-10": 0.3125, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2017-22": 0.3875, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2017-24": 0.425, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2015-18": 0.375, + "acc,exam_id__2016-19": 0.48717948717948717, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2010-01": 0.36470588235294116, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.714677578568343, + "acc,all": 0.7473560517038778 + }, + "tweetsentbr": { + "f1_macro,all": 0.6041622465790469, + "acc,all": 0.6686567164179105, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "13f00a0e36500c80ce12870ea513846a066004af", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1554.7455065359477, - "min_seq_length": 1531, - "max_seq_length": 1621, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1754.7455065359477, - "min_seq_length": 1731, - "max_seq_length": 1821, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1705.9262865090404, - "min_seq_length": 1329, - "max_seq_length": 2506, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1606.039188243527, - "min_seq_length": 1340, - "max_seq_length": 2604, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1765.9876923076922, - "min_seq_length": 1710, - "max_seq_length": 1886, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "13f00a0e36500c80ce12870ea513846a066004af", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1633.3878571428572, - "min_seq_length": 1610, - "max_seq_length": 1884, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1350.764464692483, - "min_seq_length": 1084, - "max_seq_length": 1853, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1554.7455065359477, + "min_seq_length": 1531, + "max_seq_length": 1621, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1754.7455065359477, + "min_seq_length": 1731, + "max_seq_length": 1821, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1705.9262865090404, + "min_seq_length": 1329, + "max_seq_length": 2506, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1606.039188243527, + "min_seq_length": 1340, + "max_seq_length": 2604, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1765.9876923076922, + "min_seq_length": 1710, + "max_seq_length": 1886, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1633.3878571428572, + "min_seq_length": 1610, + "max_seq_length": 1884, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1350.764464692483, + "min_seq_length": 1084, + "max_seq_length": 1853, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2131.3360752056406, + "min_seq_length": 2096, + "max_seq_length": 2170, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1877.2492537313433, + "min_seq_length": 1856, + "max_seq_length": 1972, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2131.3360752056406, - "min_seq_length": 2096, - "max_seq_length": 2170, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=GritLM/GritLM-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1877.2492537313433, - "min_seq_length": 1856, - "max_seq_length": 1972, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=GritLM/GritLM-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "f2a0116" + "git_hash": "f2a0116" } \ No newline at end of file diff --git a/GritLM/GritLM-7B/results_2024-06-12T20-31-09.833902.json b/GritLM/GritLM-7B/results_2024-06-12T20-31-09.833902.json index 550872ac9a08024b0eb1e9a50b22c3a0ec19d0c5..2c29aa4caa59f7836423afec0e29f4202469d905 100644 --- a/GritLM/GritLM-7B/results_2024-06-12T20-31-09.833902.json +++ b/GritLM/GritLM-7B/results_2024-06-12T20-31-09.833902.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6584293327497696, - "all_grouped_npm": 0.5014262136194367, + "all_grouped_average": 0.6752116173769653, + "all_grouped_npm": 0.5263998514575255, "all_grouped": { "enem_challenge": 0.6116165150454863, "bluex": 0.49235048678720444, @@ -45,7 +45,7 @@ "faquad_nli": 0.8085014118262378, "hatebr_offensive": 0.8066603535353536, "portuguese_hate_speech": 0.714677578568343, - "tweetsentbr": 0.4531216849342852 + "tweetsentbr": 0.6041622465790469 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6116165150454863, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.8085014118262378, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8066603535353536, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.714677578568343, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4531216849342852 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6041622465790469 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6116165150454863, @@ -150,9 +150,9 @@ "main_score": 0.714677578568343 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4531216849342852, + "f1_macro,all": 0.6041622465790469, "acc,all": 0.6686567164179105, - "main_score": 0.4531216849342852 + "main_score": 0.6041622465790469 } }, "config_tasks": { diff --git a/HuggingFaceH4/zephyr-7b-beta/raw_2024-02-21T23-57-52.146406/results.json b/HuggingFaceH4/zephyr-7b-beta/raw_2024-02-21T23-57-52.146406/results.json index 157bee150aba77da073a35b3491085aa56812ff2..1e868470ce5a0a01ef127bf880b0de36158803cd 100644 --- a/HuggingFaceH4/zephyr-7b-beta/raw_2024-02-21T23-57-52.146406/results.json +++ b/HuggingFaceH4/zephyr-7b-beta/raw_2024-02-21T23-57-52.146406/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8836486323653452, - "acc,all": 0.8839869281045751, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6678266192299295, - "mse,all": 0.6669526143790849, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.47983310152990266, - "acc,exam_id__USP_2021": 0.36538461538461536, - "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, - "acc,exam_id__UNICAMP_2023": 0.4418604651162791, - "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__UNICAMP_2020": 0.4909090909090909, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2020": 0.42857142857142855, - "acc,exam_id__UNICAMP_2022": 0.46153846153846156, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5787263820853744, - "acc,exam_id__2016_2": 0.5284552845528455, - "acc,exam_id__2009": 0.591304347826087, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2013": 0.5925925925925926, - "acc,exam_id__2016": 0.5537190082644629, - "acc,exam_id__2022": 0.5037593984962406, - "acc,exam_id__2023": 0.5777777777777777, - "acc,exam_id__2010": 0.5555555555555556, - "acc,exam_id__2014": 0.5963302752293578, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2017": 0.5517241379310345 - }, - "faquad_nli": { - "f1_macro,all": 0.7017672651113582, - "acc,all": 0.7384615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8176778106453834, - "acc,all": 0.82 - }, - "oab_exams": { - "acc,all": 0.3931662870159453, - "acc,exam_id__2010-02": 0.46, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2015-17": 0.4358974358974359, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2017-24": 0.325, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2011-04": 0.3125, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2011-03": 0.41414141414141414, - "acc,exam_id__2012-07": 0.3375, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2016-20a": 0.225, - "acc,exam_id__2011-05": 0.3875, - "acc,exam_id__2015-18": 0.425, - "acc,exam_id__2014-15": 0.5384615384615384, - "acc,exam_id__2018-25": 0.4125, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2013-11": 0.425, - "acc,exam_id__2014-14": 0.3625, - "acc,exam_id__2013-10": 0.3375, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2013-12": 0.475, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2012-06a": 0.3875, - "acc,exam_id__2016-20": 0.3875, - "acc,exam_id__2012-08": 0.375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6658626171810755, - "acc,all": 0.6886016451233843 - }, - "tweetsentbr": { - "f1_macro,all": 0.46064331884597925, - "acc,all": 0.6681592039800995, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8836486323653452, + "acc,all": 0.8839869281045751, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6678266192299295, + "mse,all": 0.6669526143790849, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.47983310152990266, + "acc,exam_id__USP_2021": 0.36538461538461536, + "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, + "acc,exam_id__UNICAMP_2023": 0.4418604651162791, + "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__UNICAMP_2020": 0.4909090909090909, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2020": 0.42857142857142855, + "acc,exam_id__UNICAMP_2022": 0.46153846153846156, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5787263820853744, + "acc,exam_id__2016_2": 0.5284552845528455, + "acc,exam_id__2009": 0.591304347826087, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2013": 0.5925925925925926, + "acc,exam_id__2016": 0.5537190082644629, + "acc,exam_id__2022": 0.5037593984962406, + "acc,exam_id__2023": 0.5777777777777777, + "acc,exam_id__2010": 0.5555555555555556, + "acc,exam_id__2014": 0.5963302752293578, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2017": 0.5517241379310345 + }, + "faquad_nli": { + "f1_macro,all": 0.7017672651113582, + "acc,all": 0.7384615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8176778106453834, + "acc,all": 0.82 + }, + "oab_exams": { + "acc,all": 0.3931662870159453, + "acc,exam_id__2010-02": 0.46, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2015-17": 0.4358974358974359, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2017-24": 0.325, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2011-04": 0.3125, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2011-03": 0.41414141414141414, + "acc,exam_id__2012-07": 0.3375, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2016-20a": 0.225, + "acc,exam_id__2011-05": 0.3875, + "acc,exam_id__2015-18": 0.425, + "acc,exam_id__2014-15": 0.5384615384615384, + "acc,exam_id__2018-25": 0.4125, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2013-11": 0.425, + "acc,exam_id__2014-14": 0.3625, + "acc,exam_id__2013-10": 0.3375, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2013-12": 0.475, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2012-06a": 0.3875, + "acc,exam_id__2016-20": 0.3875, + "acc,exam_id__2012-08": 0.375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6658626171810755, + "acc,all": 0.6886016451233843 + }, + "tweetsentbr": { + "f1_macro,all": 0.614191091794639, + "acc,all": 0.6681592039800995, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "dc24cabd13eacd3ae3a5fe574bd645483a335a4a", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1646.7455065359477, - "min_seq_length": 1623, - "max_seq_length": 1713, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1855.7455065359477, - "min_seq_length": 1832, - "max_seq_length": 1922, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1792.9262865090404, - "min_seq_length": 1416, - "max_seq_length": 2593, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1693.039188243527, - "min_seq_length": 1427, - "max_seq_length": 2691, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1871.9876923076922, - "min_seq_length": 1816, - "max_seq_length": 1992, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1752.3878571428572, - "min_seq_length": 1729, - "max_seq_length": 2003, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "dc24cabd13eacd3ae3a5fe574bd645483a335a4a", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1438.764464692483, - "min_seq_length": 1172, - "max_seq_length": 1941, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1646.7455065359477, + "min_seq_length": 1623, + "max_seq_length": 1713, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1855.7455065359477, + "min_seq_length": 1832, + "max_seq_length": 1922, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1792.9262865090404, + "min_seq_length": 1416, + "max_seq_length": 2593, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1693.039188243527, + "min_seq_length": 1427, + "max_seq_length": 2691, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1871.9876923076922, + "min_seq_length": 1816, + "max_seq_length": 1992, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1752.3878571428572, + "min_seq_length": 1729, + "max_seq_length": 2003, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1438.764464692483, + "min_seq_length": 1172, + "max_seq_length": 1941, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2253.3360752056406, + "min_seq_length": 2218, + "max_seq_length": 2292, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1999.2492537313433, + "min_seq_length": 1978, + "max_seq_length": 2094, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2253.3360752056406, - "min_seq_length": 2218, - "max_seq_length": 2292, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1999.2492537313433, - "min_seq_length": 1978, - "max_seq_length": 2094, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/HuggingFaceH4/zephyr-7b-beta/results_2024-02-21T23-57-52.146406.json b/HuggingFaceH4/zephyr-7b-beta/results_2024-02-21T23-57-52.146406.json index e687edd0dec8eb383d66c360c5323ba93c3b329b..b2d512e7d869ac98a8ec4736fa3aa3bee1335858 100644 --- a/HuggingFaceH4/zephyr-7b-beta/results_2024-02-21T23-57-52.146406.json +++ b/HuggingFaceH4/zephyr-7b-beta/results_2024-02-21T23-57-52.146406.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6276835593344771, - "all_grouped_npm": 0.45237979734164724, + "all_grouped_average": 0.6447444229954393, + "all_grouped_npm": 0.47776798731331716, "all_grouped": { "enem_challenge": 0.5787263820853744, "bluex": 0.47983310152990266, @@ -45,7 +45,7 @@ "faquad_nli": 0.7017672651113582, "hatebr_offensive": 0.8176778106453834, "portuguese_hate_speech": 0.6658626171810755, - "tweetsentbr": 0.46064331884597925 + "tweetsentbr": 0.614191091794639 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5787263820853744, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7017672651113582, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8176778106453834, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6658626171810755, - "harness|tweetsentbr|tweetsentbr|None|25": 0.46064331884597925 + "harness|tweetsentbr|tweetsentbr|None|25": 0.614191091794639 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5787263820853744, @@ -150,9 +150,9 @@ "main_score": 0.6658626171810755 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.46064331884597925, + "f1_macro,all": 0.614191091794639, "acc,all": 0.6681592039800995, - "main_score": 0.46064331884597925 + "main_score": 0.614191091794639 } }, "config_tasks": { diff --git a/HuggingFaceTB/SmolLM-1.7B-Instruct/raw_2024-07-29T01-29-23.830440/results.json b/HuggingFaceTB/SmolLM-1.7B-Instruct/raw_2024-07-29T01-29-23.830440/results.json index 1b1b06364b34b773cb4c9bed0610a28e2bf0de62..f179b3a34b80fb84793848ecab7449d789f1e916 100644 --- a/HuggingFaceTB/SmolLM-1.7B-Instruct/raw_2024-07-29T01-29-23.830440/results.json +++ b/HuggingFaceTB/SmolLM-1.7B-Instruct/raw_2024-07-29T01-29-23.830440/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.39689976031382207, - "acc,all": 0.5755718954248366, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.011089425612064035, - "mse,all": 2.9864903741798092, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.18497913769123783, - "acc,exam_id__USP_2018": 0.1111111111111111, - "acc,exam_id__USP_2024": 0.1951219512195122, - "acc,exam_id__UNICAMP_2019": 0.14, - "acc,exam_id__UNICAMP_2023": 0.32558139534883723, - "acc,exam_id__USP_2021": 0.11538461538461539, - "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, - "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, - "acc,exam_id__USP_2020": 0.19642857142857142, - "acc,exam_id__USP_2022": 0.12244897959183673, - "acc,exam_id__UNICAMP_2020": 0.23636363636363636, - "acc,exam_id__UNICAMP_2024": 0.3111111111111111, - "acc,exam_id__USP_2023": 0.1590909090909091, - "acc,exam_id__UNICAMP_2018": 0.2222222222222222, - "acc,exam_id__USP_2019": 0.15, - "acc,exam_id__UNICAMP_2022": 0.15384615384615385, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.15185444366689993, - "acc,exam_id__2017": 0.15517241379310345, - "acc,exam_id__2009": 0.09565217391304348, - "acc,exam_id__2013": 0.19444444444444445, - "acc,exam_id__2023": 0.2, - "acc,exam_id__2011": 0.13675213675213677, - "acc,exam_id__2010": 0.17094017094017094, - "acc,exam_id__2016_2": 0.13008130081300814, - "acc,exam_id__2014": 0.14678899082568808, - "acc,exam_id__2022": 0.15789473684210525, - "acc,exam_id__2015": 0.11764705882352941, - "acc,exam_id__2012": 0.16379310344827586, - "acc,exam_id__2016": 0.1487603305785124 - }, - "faquad_nli": { - "f1_macro,all": 0.17821782178217824, - "acc,all": 0.3323076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.24931756852830744, - "acc,all": 0.3842857142857143 - }, - "oab_exams": { - "acc,all": 0.2214123006833713, - "acc,exam_id__2011-05": 0.2125, - "acc,exam_id__2014-15": 0.21794871794871795, - "acc,exam_id__2016-21": 0.2125, - "acc,exam_id__2014-13": 0.1875, - "acc,exam_id__2015-18": 0.2, - "acc,exam_id__2016-20": 0.2375, - "acc,exam_id__2017-22": 0.1875, - "acc,exam_id__2015-17": 0.21794871794871795, - "acc,exam_id__2017-24": 0.2375, - "acc,exam_id__2011-03": 0.20202020202020202, - "acc,exam_id__2013-12": 0.175, - "acc,exam_id__2012-08": 0.1625, - "acc,exam_id__2016-19": 0.2564102564102564, - "acc,exam_id__2018-25": 0.25, - "acc,exam_id__2014-14": 0.2, - "acc,exam_id__2016-20a": 0.2875, - "acc,exam_id__2012-07": 0.25, - "acc,exam_id__2017-23": 0.2625, - "acc,exam_id__2013-10": 0.225, - "acc,exam_id__2010-01": 0.16470588235294117, - "acc,exam_id__2013-11": 0.225, - "acc,exam_id__2015-16": 0.2125, - "acc,exam_id__2011-04": 0.275, - "acc,exam_id__2012-09": 0.24675324675324675, - "acc,exam_id__2012-06a": 0.2625, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2010-02": 0.19, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.07734989648033126, - "acc,all": 0.0846063454759107 - }, - "tweetsentbr": { - "f1_macro,all": 0.11774990858411193, - "acc,all": 0.2880597014925373, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.5953496404707331, + "acc,all": 0.5755718954248366, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.011089425612064035, + "mse,all": 2.9864903741798092, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.18497913769123783, + "acc,exam_id__USP_2018": 0.1111111111111111, + "acc,exam_id__USP_2024": 0.1951219512195122, + "acc,exam_id__UNICAMP_2019": 0.14, + "acc,exam_id__UNICAMP_2023": 0.32558139534883723, + "acc,exam_id__USP_2021": 0.11538461538461539, + "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, + "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, + "acc,exam_id__USP_2020": 0.19642857142857142, + "acc,exam_id__USP_2022": 0.12244897959183673, + "acc,exam_id__UNICAMP_2020": 0.23636363636363636, + "acc,exam_id__UNICAMP_2024": 0.3111111111111111, + "acc,exam_id__USP_2023": 0.1590909090909091, + "acc,exam_id__UNICAMP_2018": 0.2222222222222222, + "acc,exam_id__USP_2019": 0.15, + "acc,exam_id__UNICAMP_2022": 0.15384615384615385, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.15185444366689993, + "acc,exam_id__2017": 0.15517241379310345, + "acc,exam_id__2009": 0.09565217391304348, + "acc,exam_id__2013": 0.19444444444444445, + "acc,exam_id__2023": 0.2, + "acc,exam_id__2011": 0.13675213675213677, + "acc,exam_id__2010": 0.17094017094017094, + "acc,exam_id__2016_2": 0.13008130081300814, + "acc,exam_id__2014": 0.14678899082568808, + "acc,exam_id__2022": 0.15789473684210525, + "acc,exam_id__2015": 0.11764705882352941, + "acc,exam_id__2012": 0.16379310344827586, + "acc,exam_id__2016": 0.1487603305785124 + }, + "faquad_nli": { + "f1_macro,all": 0.26732673267326734, + "acc,all": 0.3323076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.3739763527924611, + "acc,all": 0.3842857142857143 + }, + "oab_exams": { + "acc,all": 0.2214123006833713, + "acc,exam_id__2011-05": 0.2125, + "acc,exam_id__2014-15": 0.21794871794871795, + "acc,exam_id__2016-21": 0.2125, + "acc,exam_id__2014-13": 0.1875, + "acc,exam_id__2015-18": 0.2, + "acc,exam_id__2016-20": 0.2375, + "acc,exam_id__2017-22": 0.1875, + "acc,exam_id__2015-17": 0.21794871794871795, + "acc,exam_id__2017-24": 0.2375, + "acc,exam_id__2011-03": 0.20202020202020202, + "acc,exam_id__2013-12": 0.175, + "acc,exam_id__2012-08": 0.1625, + "acc,exam_id__2016-19": 0.2564102564102564, + "acc,exam_id__2018-25": 0.25, + "acc,exam_id__2014-14": 0.2, + "acc,exam_id__2016-20a": 0.2875, + "acc,exam_id__2012-07": 0.25, + "acc,exam_id__2017-23": 0.2625, + "acc,exam_id__2013-10": 0.225, + "acc,exam_id__2010-01": 0.16470588235294117, + "acc,exam_id__2013-11": 0.225, + "acc,exam_id__2015-16": 0.2125, + "acc,exam_id__2011-04": 0.275, + "acc,exam_id__2012-09": 0.24675324675324675, + "acc,exam_id__2012-06a": 0.2625, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2010-02": 0.19, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.1160248447204969, + "acc,all": 0.0846063454759107 + }, + "tweetsentbr": { + "f1_macro,all": 0.15699987811214924, + "acc,all": 0.2880597014925373, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1220, - "non_truncated": 12930, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 2128, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "df32c35db9f85f9c44997dcf694d1a79e944ff69", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 3422755968, - "model_num_parameters": 1711376384, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1618.1425653594772, - "min_seq_length": 1593, - "max_seq_length": 1692, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1844.1425653594772, - "min_seq_length": 1819, - "max_seq_length": 1918, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 205, - "non_truncated": 514, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 267, - "mean_seq_length": 1882.730180806676, - "min_seq_length": 1479, - "max_seq_length": 2749, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.6286509040333796 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 162, - "non_truncated": 1267, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 170, - "mean_seq_length": 1803.6703988803358, - "min_seq_length": 1508, - "max_seq_length": 2796, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.881035689293212 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1875.5692307692307, - "min_seq_length": 1815, - "max_seq_length": 2009, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1220, + "non_truncated": 12930, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 2128, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "df32c35db9f85f9c44997dcf694d1a79e944ff69", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 3422755968, + "model_num_parameters": 1711376384, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1568.8021428571428, - "min_seq_length": 1543, - "max_seq_length": 1844, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 2, - "non_truncated": 2193, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 2, - "mean_seq_length": 1567.8610478359908, - "min_seq_length": 1267, - "max_seq_length": 2116, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9990888382687926 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1618.1425653594772, + "min_seq_length": 1593, + "max_seq_length": 1692, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1844.1425653594772, + "min_seq_length": 1819, + "max_seq_length": 1918, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 205, + "non_truncated": 514, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 267, + "mean_seq_length": 1882.730180806676, + "min_seq_length": 1479, + "max_seq_length": 2749, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.6286509040333796 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 162, + "non_truncated": 1267, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 170, + "mean_seq_length": 1803.6703988803358, + "min_seq_length": 1508, + "max_seq_length": 2796, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.881035689293212 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1875.5692307692307, + "min_seq_length": 1815, + "max_seq_length": 2009, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1568.8021428571428, + "min_seq_length": 1543, + "max_seq_length": 1844, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 2, + "non_truncated": 2193, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 2, + "mean_seq_length": 1567.8610478359908, + "min_seq_length": 1267, + "max_seq_length": 2116, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9990888382687926 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 851, + "non_truncated": 0, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 1689, + "mean_seq_length": 2138.0317273795536, + "min_seq_length": 2101, + "max_seq_length": 2172, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 23.015276145710928 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1809.4268656716417, + "min_seq_length": 1788, + "max_seq_length": 1870, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 851, - "non_truncated": 0, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 1689, - "mean_seq_length": 2138.0317273795536, - "min_seq_length": 2101, - "max_seq_length": 2172, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 23.015276145710928 + "config": { + "model": "huggingface", + "model_args": "pretrained=HuggingFaceTB/SmolLM-1.7B-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1809.4268656716417, - "min_seq_length": 1788, - "max_seq_length": 1870, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=HuggingFaceTB/SmolLM-1.7B-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/HuggingFaceTB/SmolLM-1.7B-Instruct/results_2024-07-29T01-29-23.830440.json b/HuggingFaceTB/SmolLM-1.7B-Instruct/results_2024-07-29T01-29-23.830440.json index 36ac9cf36a157cba1959f3a061cbde45c079eff8..cb3943dd1c18b6779ad3dd062f9c01d7c32b8c78 100644 --- a/HuggingFaceTB/SmolLM-1.7B-Instruct/results_2024-07-29T01-29-23.830440.json +++ b/HuggingFaceTB/SmolLM-1.7B-Instruct/results_2024-07-29T01-29-23.830440.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.17654114037136934, - "all_grouped_npm": -0.2712041427416204, + "all_grouped_average": 0.23100141738029784, + "all_grouped_npm": -0.166464107969747, "all_grouped": { "enem_challenge": 0.15185444366689993, "bluex": 0.18497913769123783, "oab_exams": 0.2214123006833713, - "assin2_rte": 0.39689976031382207, + "assin2_rte": 0.5953496404707331, "assin2_sts": 0.011089425612064035, - "faquad_nli": 0.17821782178217824, - "hatebr_offensive": 0.24931756852830744, - "portuguese_hate_speech": 0.07734989648033126, - "tweetsentbr": 0.11774990858411193 + "faquad_nli": 0.26732673267326734, + "hatebr_offensive": 0.3739763527924611, + "portuguese_hate_speech": 0.1160248447204969, + "tweetsentbr": 0.15699987811214924 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.15185444366689993, "harness|bluex|bluex|None|3": 0.18497913769123783, "harness|oab_exams|oab_exams|None|3": 0.2214123006833713, - "harness|assin2_rte|assin2_rte|None|15": 0.39689976031382207, + "harness|assin2_rte|assin2_rte|None|15": 0.5953496404707331, "harness|assin2_sts|assin2_sts|None|15": 0.011089425612064035, - "harness|faquad_nli|faquad_nli|None|15": 0.17821782178217824, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.24931756852830744, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.07734989648033126, - "harness|tweetsentbr|tweetsentbr|None|25": 0.11774990858411193 + "harness|faquad_nli|faquad_nli|None|15": 0.26732673267326734, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3739763527924611, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.1160248447204969, + "harness|tweetsentbr|tweetsentbr|None|25": 0.15699987811214924 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.15185444366689993, @@ -125,9 +125,9 @@ "main_score": 0.2214123006833713 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.39689976031382207, + "f1_macro,all": 0.5953496404707331, "acc,all": 0.5755718954248366, - "main_score": 0.39689976031382207 + "main_score": 0.5953496404707331 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.011089425612064035, @@ -135,24 +135,24 @@ "main_score": 0.011089425612064035 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.17821782178217824, + "f1_macro,all": 0.26732673267326734, "acc,all": 0.3323076923076923, - "main_score": 0.17821782178217824 + "main_score": 0.26732673267326734 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.24931756852830744, + "f1_macro,all": 0.3739763527924611, "acc,all": 0.3842857142857143, - "main_score": 0.24931756852830744 + "main_score": 0.3739763527924611 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.07734989648033126, + "f1_macro,all": 0.1160248447204969, "acc,all": 0.0846063454759107, - "main_score": 0.07734989648033126 + "main_score": 0.1160248447204969 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.11774990858411193, + "f1_macro,all": 0.15699987811214924, "acc,all": 0.2880597014925373, - "main_score": 0.11774990858411193 + "main_score": 0.15699987811214924 } }, "config_tasks": { diff --git a/HuggingFaceTB/SmolLM-135M-Instruct/raw_2024-07-24T14-22-33.098781/results.json b/HuggingFaceTB/SmolLM-135M-Instruct/raw_2024-07-24T14-22-33.098781/results.json index 99f11d3e4d53a37827a005978cee1d57d1477d24..945c2d9c75fecf5a9219f2286ceaff630b06ff9b 100644 --- a/HuggingFaceTB/SmolLM-135M-Instruct/raw_2024-07-24T14-22-33.098781/results.json +++ b/HuggingFaceTB/SmolLM-135M-Instruct/raw_2024-07-24T14-22-33.098781/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.26300477210440537, - "acc,all": 0.2839052287581699, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.03362436337787243, - "mse,all": 4.79703431372549, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.1627260083449235, - "acc,exam_id__USP_2018": 0.1111111111111111, - "acc,exam_id__USP_2021": 0.15384615384615385, - "acc,exam_id__UNICAMP_2018": 0.18518518518518517, - "acc,exam_id__UNICAMP_2020": 0.16363636363636364, - "acc,exam_id__UNICAMP_2022": 0.20512820512820512, - "acc,exam_id__UNICAMP_2024": 0.15555555555555556, - "acc,exam_id__USP_2019": 0.225, - "acc,exam_id__USP_2022": 0.1836734693877551, - "acc,exam_id__USP_2023": 0.06818181818181818, - "acc,exam_id__UNICAMP_2021_2": 0.0784313725490196, - "acc,exam_id__UNICAMP_2023": 0.3023255813953488, - "acc,exam_id__USP_2024": 0.07317073170731707, - "acc,exam_id__USP_2020": 0.14285714285714285, - "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739, - "acc,exam_id__UNICAMP_2019": 0.14, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.1728481455563331, - "acc,exam_id__2014": 0.1651376146788991, - "acc,exam_id__2015": 0.13445378151260504, - "acc,exam_id__2013": 0.1574074074074074, - "acc,exam_id__2009": 0.1565217391304348, - "acc,exam_id__2022": 0.17293233082706766, - "acc,exam_id__2017": 0.15517241379310345, - "acc,exam_id__2010": 0.15384615384615385, - "acc,exam_id__2012": 0.16379310344827586, - "acc,exam_id__2016": 0.18181818181818182, - "acc,exam_id__2011": 0.18803418803418803, - "acc,exam_id__2016_2": 0.17073170731707318, - "acc,exam_id__2023": 0.25925925925925924 - }, - "faquad_nli": { - "f1_macro,all": 0.07058169545364304, - "acc,all": 0.08461538461538462, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.06650348469472238, - "acc,all": 0.05857142857142857 - }, - "oab_exams": { - "acc,all": 0.22369020501138953, - "acc,exam_id__2016-21": 0.2, - "acc,exam_id__2018-25": 0.275, - "acc,exam_id__2017-22": 0.25, - "acc,exam_id__2012-08": 0.225, - "acc,exam_id__2017-23": 0.2, - "acc,exam_id__2013-12": 0.15, - "acc,exam_id__2011-05": 0.2375, - "acc,exam_id__2016-20": 0.2, - "acc,exam_id__2014-15": 0.20512820512820512, - "acc,exam_id__2012-06a": 0.2375, - "acc,exam_id__2016-20a": 0.2875, - "acc,exam_id__2014-13": 0.2375, - "acc,exam_id__2013-10": 0.2125, - "acc,exam_id__2012-09": 0.22077922077922077, - "acc,exam_id__2015-16": 0.2375, - "acc,exam_id__2011-03": 0.24242424242424243, - "acc,exam_id__2013-11": 0.15, - "acc,exam_id__2016-19": 0.1794871794871795, - "acc,exam_id__2014-14": 0.25, - "acc,exam_id__2010-01": 0.25882352941176473, - "acc,exam_id__2012-07": 0.1375, - "acc,exam_id__2017-24": 0.225, - "acc,exam_id__2015-18": 0.25, - "acc,exam_id__2011-04": 0.25, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2010-02": 0.23, - "acc,exam_id__2015-17": 0.24358974358974358, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.07431443242652681, - "acc,all": 0.07638072855464159 - }, - "tweetsentbr": { - "f1_macro,all": 0.10296765823081612, - "acc,all": 0.14378109452736318, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.3945071581566081, + "acc,all": 0.2839052287581699, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.03362436337787243, + "mse,all": 4.79703431372549, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.1627260083449235, + "acc,exam_id__USP_2018": 0.1111111111111111, + "acc,exam_id__USP_2021": 0.15384615384615385, + "acc,exam_id__UNICAMP_2018": 0.18518518518518517, + "acc,exam_id__UNICAMP_2020": 0.16363636363636364, + "acc,exam_id__UNICAMP_2022": 0.20512820512820512, + "acc,exam_id__UNICAMP_2024": 0.15555555555555556, + "acc,exam_id__USP_2019": 0.225, + "acc,exam_id__USP_2022": 0.1836734693877551, + "acc,exam_id__USP_2023": 0.06818181818181818, + "acc,exam_id__UNICAMP_2021_2": 0.0784313725490196, + "acc,exam_id__UNICAMP_2023": 0.3023255813953488, + "acc,exam_id__USP_2024": 0.07317073170731707, + "acc,exam_id__USP_2020": 0.14285714285714285, + "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739, + "acc,exam_id__UNICAMP_2019": 0.14, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.1728481455563331, + "acc,exam_id__2014": 0.1651376146788991, + "acc,exam_id__2015": 0.13445378151260504, + "acc,exam_id__2013": 0.1574074074074074, + "acc,exam_id__2009": 0.1565217391304348, + "acc,exam_id__2022": 0.17293233082706766, + "acc,exam_id__2017": 0.15517241379310345, + "acc,exam_id__2010": 0.15384615384615385, + "acc,exam_id__2012": 0.16379310344827586, + "acc,exam_id__2016": 0.18181818181818182, + "acc,exam_id__2011": 0.18803418803418803, + "acc,exam_id__2016_2": 0.17073170731707318, + "acc,exam_id__2023": 0.25925925925925924 + }, + "faquad_nli": { + "f1_macro,all": 0.10587254318046457, + "acc,all": 0.08461538461538462, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.09975522704208357, + "acc,all": 0.05857142857142857 + }, + "oab_exams": { + "acc,all": 0.22369020501138953, + "acc,exam_id__2016-21": 0.2, + "acc,exam_id__2018-25": 0.275, + "acc,exam_id__2017-22": 0.25, + "acc,exam_id__2012-08": 0.225, + "acc,exam_id__2017-23": 0.2, + "acc,exam_id__2013-12": 0.15, + "acc,exam_id__2011-05": 0.2375, + "acc,exam_id__2016-20": 0.2, + "acc,exam_id__2014-15": 0.20512820512820512, + "acc,exam_id__2012-06a": 0.2375, + "acc,exam_id__2016-20a": 0.2875, + "acc,exam_id__2014-13": 0.2375, + "acc,exam_id__2013-10": 0.2125, + "acc,exam_id__2012-09": 0.22077922077922077, + "acc,exam_id__2015-16": 0.2375, + "acc,exam_id__2011-03": 0.24242424242424243, + "acc,exam_id__2013-11": 0.15, + "acc,exam_id__2016-19": 0.1794871794871795, + "acc,exam_id__2014-14": 0.25, + "acc,exam_id__2010-01": 0.25882352941176473, + "acc,exam_id__2012-07": 0.1375, + "acc,exam_id__2017-24": 0.225, + "acc,exam_id__2015-18": 0.25, + "acc,exam_id__2011-04": 0.25, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2010-02": 0.23, + "acc,exam_id__2015-17": 0.24358974358974358, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.11147164863979023, + "acc,all": 0.07638072855464159 + }, + "tweetsentbr": { + "f1_macro,all": 0.1372902109744215, + "acc,all": 0.14378109452736318, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1220, - "non_truncated": 12930, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 2128, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "0a0a7c2a1b1dc8f75f1d5a6ac86d38e3e7bab014", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 269033984, - "model_num_parameters": 134515008, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1618.1425653594772, - "min_seq_length": 1593, - "max_seq_length": 1692, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1844.1425653594772, - "min_seq_length": 1819, - "max_seq_length": 1918, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 205, - "non_truncated": 514, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 267, - "mean_seq_length": 1882.730180806676, - "min_seq_length": 1479, - "max_seq_length": 2749, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.6286509040333796 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 162, - "non_truncated": 1267, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 170, - "mean_seq_length": 1803.6703988803358, - "min_seq_length": 1508, - "max_seq_length": 2796, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.881035689293212 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1875.5692307692307, - "min_seq_length": 1815, - "max_seq_length": 2009, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1220, + "non_truncated": 12930, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 2128, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "0a0a7c2a1b1dc8f75f1d5a6ac86d38e3e7bab014", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 269033984, + "model_num_parameters": 134515008, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1568.8021428571428, - "min_seq_length": 1543, - "max_seq_length": 1844, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 2, - "non_truncated": 2193, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 2, - "mean_seq_length": 1567.8610478359908, - "min_seq_length": 1267, - "max_seq_length": 2116, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9990888382687926 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1618.1425653594772, + "min_seq_length": 1593, + "max_seq_length": 1692, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1844.1425653594772, + "min_seq_length": 1819, + "max_seq_length": 1918, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 205, + "non_truncated": 514, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 267, + "mean_seq_length": 1882.730180806676, + "min_seq_length": 1479, + "max_seq_length": 2749, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.6286509040333796 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 162, + "non_truncated": 1267, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 170, + "mean_seq_length": 1803.6703988803358, + "min_seq_length": 1508, + "max_seq_length": 2796, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.881035689293212 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1875.5692307692307, + "min_seq_length": 1815, + "max_seq_length": 2009, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1568.8021428571428, + "min_seq_length": 1543, + "max_seq_length": 1844, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 2, + "non_truncated": 2193, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 2, + "mean_seq_length": 1567.8610478359908, + "min_seq_length": 1267, + "max_seq_length": 2116, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9990888382687926 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 851, + "non_truncated": 0, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 1689, + "mean_seq_length": 2138.0317273795536, + "min_seq_length": 2101, + "max_seq_length": 2172, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 23.015276145710928 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1809.4268656716417, + "min_seq_length": 1788, + "max_seq_length": 1870, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 851, - "non_truncated": 0, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 1689, - "mean_seq_length": 2138.0317273795536, - "min_seq_length": 2101, - "max_seq_length": 2172, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 23.015276145710928 + "config": { + "model": "huggingface", + "model_args": "pretrained=HuggingFaceTB/SmolLM-135M-Instruct,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1809.4268656716417, - "min_seq_length": 1788, - "max_seq_length": 1870, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=HuggingFaceTB/SmolLM-135M-Instruct,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/HuggingFaceTB/SmolLM-135M-Instruct/results_2024-07-24T14-22-33.098781.json b/HuggingFaceTB/SmolLM-135M-Instruct/results_2024-07-24T14-22-33.098781.json index 8f1f4f206468550f0d1dfbba877491cb36fcfb47..10e56d3a6974903e3c694343733fb23d16526625 100644 --- a/HuggingFaceTB/SmolLM-135M-Instruct/results_2024-07-24T14-22-33.098781.json +++ b/HuggingFaceTB/SmolLM-135M-Instruct/results_2024-07-24T14-22-33.098781.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.13002897391118137, - "all_grouped_npm": -0.3640932247152165, + "all_grouped_average": 0.16019839003154296, + "all_grouped_npm": -0.3066737359390134, "all_grouped": { "enem_challenge": 0.1728481455563331, "bluex": 0.1627260083449235, "oab_exams": 0.22369020501138953, - "assin2_rte": 0.26300477210440537, + "assin2_rte": 0.3945071581566081, "assin2_sts": 0.03362436337787243, - "faquad_nli": 0.07058169545364304, - "hatebr_offensive": 0.06650348469472238, - "portuguese_hate_speech": 0.07431443242652681, - "tweetsentbr": 0.10296765823081612 + "faquad_nli": 0.10587254318046457, + "hatebr_offensive": 0.09975522704208357, + "portuguese_hate_speech": 0.11147164863979023, + "tweetsentbr": 0.1372902109744215 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.1728481455563331, "harness|bluex|bluex|None|3": 0.1627260083449235, "harness|oab_exams|oab_exams|None|3": 0.22369020501138953, - "harness|assin2_rte|assin2_rte|None|15": 0.26300477210440537, + "harness|assin2_rte|assin2_rte|None|15": 0.3945071581566081, "harness|assin2_sts|assin2_sts|None|15": 0.03362436337787243, - "harness|faquad_nli|faquad_nli|None|15": 0.07058169545364304, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.06650348469472238, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.07431443242652681, - "harness|tweetsentbr|tweetsentbr|None|25": 0.10296765823081612 + "harness|faquad_nli|faquad_nli|None|15": 0.10587254318046457, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.09975522704208357, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.11147164863979023, + "harness|tweetsentbr|tweetsentbr|None|25": 0.1372902109744215 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.1728481455563331, @@ -125,9 +125,9 @@ "main_score": 0.22369020501138953 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.26300477210440537, + "f1_macro,all": 0.3945071581566081, "acc,all": 0.2839052287581699, - "main_score": 0.26300477210440537 + "main_score": 0.3945071581566081 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.03362436337787243, @@ -135,24 +135,24 @@ "main_score": 0.03362436337787243 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.07058169545364304, + "f1_macro,all": 0.10587254318046457, "acc,all": 0.08461538461538462, - "main_score": 0.07058169545364304 + "main_score": 0.10587254318046457 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.06650348469472238, + "f1_macro,all": 0.09975522704208357, "acc,all": 0.05857142857142857, - "main_score": 0.06650348469472238 + "main_score": 0.09975522704208357 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.07431443242652681, + "f1_macro,all": 0.11147164863979023, "acc,all": 0.07638072855464159, - "main_score": 0.07431443242652681 + "main_score": 0.11147164863979023 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.10296765823081612, + "f1_macro,all": 0.1372902109744215, "acc,all": 0.14378109452736318, - "main_score": 0.10296765823081612 + "main_score": 0.1372902109744215 } }, "config_tasks": { diff --git a/HuggingFaceTB/SmolLM-360M-Instruct/raw_2024-07-24T17-08-31.061263/results.json b/HuggingFaceTB/SmolLM-360M-Instruct/raw_2024-07-24T17-08-31.061263/results.json index 80ec9b26acb71c3ff96d7c8dbc75e73df7298f4b..032c1539743ca927baf6b7be5f96704b34847776 100644 --- a/HuggingFaceTB/SmolLM-360M-Instruct/raw_2024-07-24T17-08-31.061263/results.json +++ b/HuggingFaceTB/SmolLM-360M-Instruct/raw_2024-07-24T17-08-31.061263/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.3333333333333333, - "acc,all": 0.5, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.036062757273046704, - "mse,all": 2.5509232026143787, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.16689847009735745, - "acc,exam_id__USP_2018": 0.14814814814814814, - "acc,exam_id__USP_2021": 0.15384615384615385, - "acc,exam_id__UNICAMP_2018": 0.14814814814814814, - "acc,exam_id__UNICAMP_2020": 0.14545454545454545, - "acc,exam_id__UNICAMP_2022": 0.20512820512820512, - "acc,exam_id__UNICAMP_2024": 0.15555555555555556, - "acc,exam_id__USP_2019": 0.225, - "acc,exam_id__USP_2022": 0.22448979591836735, - "acc,exam_id__USP_2023": 0.045454545454545456, - "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941, - "acc,exam_id__UNICAMP_2023": 0.3023255813953488, - "acc,exam_id__USP_2024": 0.07317073170731707, - "acc,exam_id__USP_2020": 0.17857142857142858, - "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913, - "acc,exam_id__UNICAMP_2019": 0.14, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.17984604618614417, - "acc,exam_id__2014": 0.1926605504587156, - "acc,exam_id__2015": 0.13445378151260504, - "acc,exam_id__2013": 0.1574074074074074, - "acc,exam_id__2009": 0.14782608695652175, - "acc,exam_id__2022": 0.18796992481203006, - "acc,exam_id__2017": 0.19827586206896552, - "acc,exam_id__2010": 0.13675213675213677, - "acc,exam_id__2012": 0.1810344827586207, - "acc,exam_id__2016": 0.19834710743801653, - "acc,exam_id__2011": 0.1794871794871795, - "acc,exam_id__2016_2": 0.17886178861788618, - "acc,exam_id__2023": 0.2518518518518518 - }, - "faquad_nli": { - "f1_macro,all": 0.09262250942380183, - "acc,all": 0.13230769230769232, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.23490637157803498, - "acc,all": 0.3407142857142857 - }, - "oab_exams": { - "acc,all": 0.23006833712984054, - "acc,exam_id__2016-21": 0.2125, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2017-22": 0.25, - "acc,exam_id__2012-08": 0.225, - "acc,exam_id__2017-23": 0.2125, - "acc,exam_id__2013-12": 0.175, - "acc,exam_id__2011-05": 0.2375, - "acc,exam_id__2016-20": 0.225, - "acc,exam_id__2014-15": 0.21794871794871795, - "acc,exam_id__2012-06a": 0.2375, - "acc,exam_id__2016-20a": 0.3, - "acc,exam_id__2014-13": 0.2375, - "acc,exam_id__2013-10": 0.2125, - "acc,exam_id__2012-09": 0.23376623376623376, - "acc,exam_id__2015-16": 0.2375, - "acc,exam_id__2011-03": 0.24242424242424243, - "acc,exam_id__2013-11": 0.1625, - "acc,exam_id__2016-19": 0.19230769230769232, - "acc,exam_id__2014-14": 0.2625, - "acc,exam_id__2010-01": 0.25882352941176473, - "acc,exam_id__2012-07": 0.1375, - "acc,exam_id__2017-24": 0.225, - "acc,exam_id__2015-18": 0.25, - "acc,exam_id__2011-04": 0.25, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2010-02": 0.24, - "acc,exam_id__2015-17": 0.24358974358974358, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.1945811604219462, - "acc,all": 0.20681551116333724 - }, - "tweetsentbr": { - "f1_macro,all": 0.12400834046013225, - "acc,all": 0.2975124378109453, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.3333333333333333, + "acc,all": 0.5, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.036062757273046704, + "mse,all": 2.5509232026143787, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.16689847009735745, + "acc,exam_id__USP_2018": 0.14814814814814814, + "acc,exam_id__USP_2021": 0.15384615384615385, + "acc,exam_id__UNICAMP_2018": 0.14814814814814814, + "acc,exam_id__UNICAMP_2020": 0.14545454545454545, + "acc,exam_id__UNICAMP_2022": 0.20512820512820512, + "acc,exam_id__UNICAMP_2024": 0.15555555555555556, + "acc,exam_id__USP_2019": 0.225, + "acc,exam_id__USP_2022": 0.22448979591836735, + "acc,exam_id__USP_2023": 0.045454545454545456, + "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941, + "acc,exam_id__UNICAMP_2023": 0.3023255813953488, + "acc,exam_id__USP_2024": 0.07317073170731707, + "acc,exam_id__USP_2020": 0.17857142857142858, + "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913, + "acc,exam_id__UNICAMP_2019": 0.14, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.17984604618614417, + "acc,exam_id__2014": 0.1926605504587156, + "acc,exam_id__2015": 0.13445378151260504, + "acc,exam_id__2013": 0.1574074074074074, + "acc,exam_id__2009": 0.14782608695652175, + "acc,exam_id__2022": 0.18796992481203006, + "acc,exam_id__2017": 0.19827586206896552, + "acc,exam_id__2010": 0.13675213675213677, + "acc,exam_id__2012": 0.1810344827586207, + "acc,exam_id__2016": 0.19834710743801653, + "acc,exam_id__2011": 0.1794871794871795, + "acc,exam_id__2016_2": 0.17886178861788618, + "acc,exam_id__2023": 0.2518518518518518 + }, + "faquad_nli": { + "f1_macro,all": 0.13893376413570277, + "acc,all": 0.13230769230769232, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.3523595573670525, + "acc,all": 0.3407142857142857 + }, + "oab_exams": { + "acc,all": 0.23006833712984054, + "acc,exam_id__2016-21": 0.2125, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2017-22": 0.25, + "acc,exam_id__2012-08": 0.225, + "acc,exam_id__2017-23": 0.2125, + "acc,exam_id__2013-12": 0.175, + "acc,exam_id__2011-05": 0.2375, + "acc,exam_id__2016-20": 0.225, + "acc,exam_id__2014-15": 0.21794871794871795, + "acc,exam_id__2012-06a": 0.2375, + "acc,exam_id__2016-20a": 0.3, + "acc,exam_id__2014-13": 0.2375, + "acc,exam_id__2013-10": 0.2125, + "acc,exam_id__2012-09": 0.23376623376623376, + "acc,exam_id__2015-16": 0.2375, + "acc,exam_id__2011-03": 0.24242424242424243, + "acc,exam_id__2013-11": 0.1625, + "acc,exam_id__2016-19": 0.19230769230769232, + "acc,exam_id__2014-14": 0.2625, + "acc,exam_id__2010-01": 0.25882352941176473, + "acc,exam_id__2012-07": 0.1375, + "acc,exam_id__2017-24": 0.225, + "acc,exam_id__2015-18": 0.25, + "acc,exam_id__2011-04": 0.25, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2010-02": 0.24, + "acc,exam_id__2015-17": 0.24358974358974358, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.2918717406329194, + "acc,all": 0.20681551116333724 + }, + "tweetsentbr": { + "f1_macro,all": 0.16534445394684297, + "acc,all": 0.2975124378109453, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1220, - "non_truncated": 12930, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 2128, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "61ee8dc97f5b99b609255a3b6091cbc0023c7692", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 723646464, - "model_num_parameters": 361821120, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1618.1425653594772, - "min_seq_length": 1593, - "max_seq_length": 1692, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1844.1425653594772, - "min_seq_length": 1819, - "max_seq_length": 1918, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 205, - "non_truncated": 514, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 267, - "mean_seq_length": 1882.730180806676, - "min_seq_length": 1479, - "max_seq_length": 2749, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.6286509040333796 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 162, - "non_truncated": 1267, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 170, - "mean_seq_length": 1803.6703988803358, - "min_seq_length": 1508, - "max_seq_length": 2796, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.881035689293212 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1875.5692307692307, - "min_seq_length": 1815, - "max_seq_length": 2009, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1220, + "non_truncated": 12930, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 2128, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "61ee8dc97f5b99b609255a3b6091cbc0023c7692", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 723646464, + "model_num_parameters": 361821120, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1568.8021428571428, - "min_seq_length": 1543, - "max_seq_length": 1844, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 2, - "non_truncated": 2193, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 2, - "mean_seq_length": 1567.8610478359908, - "min_seq_length": 1267, - "max_seq_length": 2116, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9990888382687926 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1618.1425653594772, + "min_seq_length": 1593, + "max_seq_length": 1692, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1844.1425653594772, + "min_seq_length": 1819, + "max_seq_length": 1918, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 205, + "non_truncated": 514, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 267, + "mean_seq_length": 1882.730180806676, + "min_seq_length": 1479, + "max_seq_length": 2749, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.6286509040333796 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 162, + "non_truncated": 1267, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 170, + "mean_seq_length": 1803.6703988803358, + "min_seq_length": 1508, + "max_seq_length": 2796, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.881035689293212 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1875.5692307692307, + "min_seq_length": 1815, + "max_seq_length": 2009, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1568.8021428571428, + "min_seq_length": 1543, + "max_seq_length": 1844, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 2, + "non_truncated": 2193, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 2, + "mean_seq_length": 1567.8610478359908, + "min_seq_length": 1267, + "max_seq_length": 2116, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9990888382687926 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 851, + "non_truncated": 0, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 1689, + "mean_seq_length": 2138.0317273795536, + "min_seq_length": 2101, + "max_seq_length": 2172, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 23.015276145710928 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1809.4268656716417, + "min_seq_length": 1788, + "max_seq_length": 1870, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 851, - "non_truncated": 0, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 1689, - "mean_seq_length": 2138.0317273795536, - "min_seq_length": 2101, - "max_seq_length": 2172, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 23.015276145710928 + "config": { + "model": "huggingface", + "model_args": "pretrained=HuggingFaceTB/SmolLM-360M-Instruct,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1809.4268656716417, - "min_seq_length": 1788, - "max_seq_length": 1870, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=HuggingFaceTB/SmolLM-360M-Instruct,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/HuggingFaceTB/SmolLM-360M-Instruct/results_2024-07-24T17-08-31.061263.json b/HuggingFaceTB/SmolLM-360M-Instruct/results_2024-07-24T17-08-31.061263.json index 5b10df18dda773ce20d03cca0fb28756e83bdb5f..57d7a02f9c0dc20b847ed25613490e0546949e1d 100644 --- a/HuggingFaceTB/SmolLM-360M-Instruct/results_2024-07-24T17-08-31.061263.json +++ b/HuggingFaceTB/SmolLM-360M-Instruct/results_2024-07-24T17-08-31.061263.json @@ -34,18 +34,18 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.1769252584337375, - "all_grouped_npm": -0.27462637422910463, + "all_grouped_average": 0.21052427334469329, + "all_grouped_npm": -0.21148330912467844, "all_grouped": { "enem_challenge": 0.17984604618614417, "bluex": 0.16689847009735745, "oab_exams": 0.23006833712984054, "assin2_rte": 0.3333333333333333, "assin2_sts": 0.036062757273046704, - "faquad_nli": 0.09262250942380183, - "hatebr_offensive": 0.23490637157803498, - "portuguese_hate_speech": 0.1945811604219462, - "tweetsentbr": 0.12400834046013225 + "faquad_nli": 0.13893376413570277, + "hatebr_offensive": 0.3523595573670525, + "portuguese_hate_speech": 0.2918717406329194, + "tweetsentbr": 0.16534445394684297 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.17984604618614417, @@ -53,10 +53,10 @@ "harness|oab_exams|oab_exams|None|3": 0.23006833712984054, "harness|assin2_rte|assin2_rte|None|15": 0.3333333333333333, "harness|assin2_sts|assin2_sts|None|15": 0.036062757273046704, - "harness|faquad_nli|faquad_nli|None|15": 0.09262250942380183, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.23490637157803498, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.1945811604219462, - "harness|tweetsentbr|tweetsentbr|None|25": 0.12400834046013225 + "harness|faquad_nli|faquad_nli|None|15": 0.13893376413570277, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3523595573670525, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2918717406329194, + "harness|tweetsentbr|tweetsentbr|None|25": 0.16534445394684297 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.17984604618614417, @@ -135,24 +135,24 @@ "main_score": 0.036062757273046704 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.09262250942380183, + "f1_macro,all": 0.13893376413570277, "acc,all": 0.13230769230769232, - "main_score": 0.09262250942380183 + "main_score": 0.13893376413570277 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.23490637157803498, + "f1_macro,all": 0.3523595573670525, "acc,all": 0.3407142857142857, - "main_score": 0.23490637157803498 + "main_score": 0.3523595573670525 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.1945811604219462, + "f1_macro,all": 0.2918717406329194, "acc,all": 0.20681551116333724, - "main_score": 0.1945811604219462 + "main_score": 0.2918717406329194 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.12400834046013225, + "f1_macro,all": 0.16534445394684297, "acc,all": 0.2975124378109453, - "main_score": 0.12400834046013225 + "main_score": 0.16534445394684297 } }, "config_tasks": { diff --git a/Intel/neural-chat-7b-v3-1/raw_2024-02-25T06-21-33.008420/results.json b/Intel/neural-chat-7b-v3-1/raw_2024-02-25T06-21-33.008420/results.json index d5e81dc7b69bcf3bf61986db4000be4485b4effa..438c823dcfbf938795723fb6d887cf29001f643a 100644 --- a/Intel/neural-chat-7b-v3-1/raw_2024-02-25T06-21-33.008420/results.json +++ b/Intel/neural-chat-7b-v3-1/raw_2024-02-25T06-21-33.008420/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9268770228292367, - "acc,all": 0.9268790849673203, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7658477385894799, - "mse,all": 0.5124264705882353, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.47983310152990266, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__UNICAMP_2020": 0.43636363636363634, - "acc,exam_id__UNICAMP_2023": 0.5348837209302325, - "acc,exam_id__USP_2021": 0.4423076923076923, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__USP_2018": 0.37037037037037035, - "acc,exam_id__UNICAMP_2021_2": 0.45098039215686275, - "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, - "acc,exam_id__USP_2024": 0.5853658536585366, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6263121063680895, - "acc,exam_id__2013": 0.6388888888888888, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2016": 0.5619834710743802, - "acc,exam_id__2011": 0.6837606837606838, - "acc,exam_id__2017": 0.5948275862068966, - "acc,exam_id__2023": 0.6814814814814815, - "acc,exam_id__2014": 0.6513761467889908, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2009": 0.6, - "acc,exam_id__2015": 0.5966386554621849, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2010": 0.5811965811965812 - }, - "faquad_nli": { - "f1_macro,all": 0.7840135895978708, - "acc,all": 0.8353846153846154, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8905574366528357, - "acc,all": 0.8907142857142857 - }, - "oab_exams": { - "acc,all": 0.39726651480637815, - "acc,exam_id__2016-20a": 0.3875, - "acc,exam_id__2012-06": 0.4125, - "acc,exam_id__2015-18": 0.4, - "acc,exam_id__2014-14": 0.475, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2017-23": 0.35, - "acc,exam_id__2016-19": 0.4230769230769231, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2017-22": 0.475, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2010-02": 0.36, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2012-08": 0.425, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2014-13": 0.275, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2011-04": 0.3125, - "acc,exam_id__2012-09": 0.33766233766233766, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6685671281654837, - "acc,all": 0.6874265569917744 - }, - "tweetsentbr": { - "f1_macro,all": 0.5145983702206705, - "acc,all": 0.7114427860696517, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9268770228292367, + "acc,all": 0.9268790849673203, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7658477385894799, + "mse,all": 0.5124264705882353, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.47983310152990266, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__UNICAMP_2020": 0.43636363636363634, + "acc,exam_id__UNICAMP_2023": 0.5348837209302325, + "acc,exam_id__USP_2021": 0.4423076923076923, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__USP_2018": 0.37037037037037035, + "acc,exam_id__UNICAMP_2021_2": 0.45098039215686275, + "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, + "acc,exam_id__USP_2024": 0.5853658536585366, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6263121063680895, + "acc,exam_id__2013": 0.6388888888888888, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2016": 0.5619834710743802, + "acc,exam_id__2011": 0.6837606837606838, + "acc,exam_id__2017": 0.5948275862068966, + "acc,exam_id__2023": 0.6814814814814815, + "acc,exam_id__2014": 0.6513761467889908, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2009": 0.6, + "acc,exam_id__2015": 0.5966386554621849, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2010": 0.5811965811965812 + }, + "faquad_nli": { + "f1_macro,all": 0.7840135895978708, + "acc,all": 0.8353846153846154, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8905574366528357, + "acc,all": 0.8907142857142857 + }, + "oab_exams": { + "acc,all": 0.39726651480637815, + "acc,exam_id__2016-20a": 0.3875, + "acc,exam_id__2012-06": 0.4125, + "acc,exam_id__2015-18": 0.4, + "acc,exam_id__2014-14": 0.475, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2017-23": 0.35, + "acc,exam_id__2016-19": 0.4230769230769231, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2017-22": 0.475, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2010-02": 0.36, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2012-08": 0.425, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2014-13": 0.275, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2011-04": 0.3125, + "acc,exam_id__2012-09": 0.33766233766233766, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6685671281654837, + "acc,all": 0.6874265569917744 + }, + "tweetsentbr": { + "f1_macro,all": 0.6861311602942273, + "acc,all": 0.7114427860696517, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e852bc2e78a3fe509ec28c6d76512df3012acba7", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e852bc2e78a3fe509ec28c6d76512df3012acba7", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Intel/neural-chat-7b-v3-1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Intel/neural-chat-7b-v3-1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/Intel/neural-chat-7b-v3-1/results_2024-02-25T06-21-33.008420.json b/Intel/neural-chat-7b-v3-1/results_2024-02-25T06-21-33.008420.json index 472c5a6b19b5cd2592d61adfce4fd296c533be11..a249602d1eba4eaea7536cc70ca1345c49bad95e 100644 --- a/Intel/neural-chat-7b-v3-1/results_2024-02-25T06-21-33.008420.json +++ b/Intel/neural-chat-7b-v3-1/results_2024-02-25T06-21-33.008420.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6726525565288831, - "all_grouped_npm": 0.5225859783991329, + "all_grouped_average": 0.691711755425945, + "all_grouped_npm": 0.5509478815197606, "all_grouped": { "enem_challenge": 0.6263121063680895, "bluex": 0.47983310152990266, @@ -45,7 +45,7 @@ "faquad_nli": 0.7840135895978708, "hatebr_offensive": 0.8905574366528357, "portuguese_hate_speech": 0.6685671281654837, - "tweetsentbr": 0.5145983702206705 + "tweetsentbr": 0.6861311602942273 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6263121063680895, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7840135895978708, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8905574366528357, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6685671281654837, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5145983702206705 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6861311602942273 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6263121063680895, @@ -150,9 +150,9 @@ "main_score": 0.6685671281654837 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5145983702206705, + "f1_macro,all": 0.6861311602942273, "acc,all": 0.7114427860696517, - "main_score": 0.5145983702206705 + "main_score": 0.6861311602942273 } }, "config_tasks": { diff --git a/Intel/neural-chat-7b-v3-3/raw_2024-02-21T22-54-50.520595/results.json b/Intel/neural-chat-7b-v3-3/raw_2024-02-21T22-54-50.520595/results.json index 05cb5d4da227d079f059230c62e42e9e43c9c463..1dbd60a181a8a2c2c5a1ad45dc9ca4a5f52fe4a6 100644 --- a/Intel/neural-chat-7b-v3-3/raw_2024-02-21T22-54-50.520595/results.json +++ b/Intel/neural-chat-7b-v3-3/raw_2024-02-21T22-54-50.520595/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9140545431322211, - "acc,all": 0.9142156862745098, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7587721518241414, - "mse,all": 0.5857066993464052, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5034770514603616, - "acc,exam_id__USP_2021": 0.4423076923076923, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, - "acc,exam_id__USP_2024": 0.6585365853658537, - "acc,exam_id__UNICAMP_2018": 0.46296296296296297, - "acc,exam_id__USP_2022": 0.42857142857142855, - "acc,exam_id__UNICAMP_2020": 0.45454545454545453, - "acc,exam_id__USP_2018": 0.3888888888888889, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2019": 0.475, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6263121063680895, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2012": 0.6120689655172413, - "acc,exam_id__2013": 0.6111111111111112, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2022": 0.6390977443609023, - "acc,exam_id__2023": 0.6592592592592592, - "acc,exam_id__2010": 0.5982905982905983, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2015": 0.6050420168067226, - "acc,exam_id__2017": 0.5948275862068966 - }, - "faquad_nli": { - "f1_macro,all": 0.7147222222222223, - "acc,all": 0.7569230769230769, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8653967318817455, - "acc,all": 0.8657142857142858 - }, - "oab_exams": { - "acc,all": 0.39635535307517084, - "acc,exam_id__2010-02": 0.43, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2015-17": 0.47435897435897434, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2011-04": 0.3625, - "acc,exam_id__2017-23": 0.3875, - "acc,exam_id__2011-03": 0.36363636363636365, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2014-13": 0.3, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2014-15": 0.41025641025641024, - "acc,exam_id__2018-25": 0.4625, - "acc,exam_id__2017-22": 0.475, - "acc,exam_id__2013-11": 0.4375, - "acc,exam_id__2014-14": 0.425, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2010-01": 0.3176470588235294, - "acc,exam_id__2013-12": 0.4125, - "acc,exam_id__2015-16": 0.2875, - "acc,exam_id__2012-06a": 0.4375, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2012-08": 0.325, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6322323153577603, - "acc,all": 0.6404230317273796 - }, - "tweetsentbr": { - "f1_macro,all": 0.4689260995001763, - "acc,all": 0.6925373134328359, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9140545431322211, + "acc,all": 0.9142156862745098, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7587721518241414, + "mse,all": 0.5857066993464052, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5034770514603616, + "acc,exam_id__USP_2021": 0.4423076923076923, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, + "acc,exam_id__USP_2024": 0.6585365853658537, + "acc,exam_id__UNICAMP_2018": 0.46296296296296297, + "acc,exam_id__USP_2022": 0.42857142857142855, + "acc,exam_id__UNICAMP_2020": 0.45454545454545453, + "acc,exam_id__USP_2018": 0.3888888888888889, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2019": 0.475, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6263121063680895, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2012": 0.6120689655172413, + "acc,exam_id__2013": 0.6111111111111112, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2022": 0.6390977443609023, + "acc,exam_id__2023": 0.6592592592592592, + "acc,exam_id__2010": 0.5982905982905983, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2015": 0.6050420168067226, + "acc,exam_id__2017": 0.5948275862068966 + }, + "faquad_nli": { + "f1_macro,all": 0.7147222222222223, + "acc,all": 0.7569230769230769, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8653967318817455, + "acc,all": 0.8657142857142858 + }, + "oab_exams": { + "acc,all": 0.39635535307517084, + "acc,exam_id__2010-02": 0.43, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2015-17": 0.47435897435897434, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2011-04": 0.3625, + "acc,exam_id__2017-23": 0.3875, + "acc,exam_id__2011-03": 0.36363636363636365, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2014-13": 0.3, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2014-15": 0.41025641025641024, + "acc,exam_id__2018-25": 0.4625, + "acc,exam_id__2017-22": 0.475, + "acc,exam_id__2013-11": 0.4375, + "acc,exam_id__2014-14": 0.425, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2010-01": 0.3176470588235294, + "acc,exam_id__2013-12": 0.4125, + "acc,exam_id__2015-16": 0.2875, + "acc,exam_id__2012-06a": 0.4375, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2012-08": 0.325, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6322323153577603, + "acc,all": 0.6404230317273796 + }, + "tweetsentbr": { + "f1_macro,all": 0.6252347993335684, + "acc,all": 0.6925373134328359, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "cdce282d00962dcc4dc317fc5786b332d370a6d4", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "cdce282d00962dcc4dc317fc5786b332d370a6d4", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Intel/neural-chat-7b-v3-3,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Intel/neural-chat-7b-v3-3,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/Intel/neural-chat-7b-v3-3/results_2024-02-21T22-54-50.520595.json b/Intel/neural-chat-7b-v3-3/results_2024-02-21T22-54-50.520595.json index b70745e11b816cc88686076665c554a4645895b6..096172a8f6a046bc2ca4560edcef308d44e597a8 100644 --- a/Intel/neural-chat-7b-v3-3/results_2024-02-21T22-54-50.520595.json +++ b/Intel/neural-chat-7b-v3-3/results_2024-02-21T22-54-50.520595.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6533609527579876, - "all_grouped_npm": 0.4871606934211402, + "all_grouped_average": 0.6707285860728089, + "all_grouped_npm": 0.5130053858539101, "all_grouped": { "enem_challenge": 0.6263121063680895, "bluex": 0.5034770514603616, @@ -45,7 +45,7 @@ "faquad_nli": 0.7147222222222223, "hatebr_offensive": 0.8653967318817455, "portuguese_hate_speech": 0.6322323153577603, - "tweetsentbr": 0.4689260995001763 + "tweetsentbr": 0.6252347993335684 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6263121063680895, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7147222222222223, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8653967318817455, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6322323153577603, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4689260995001763 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6252347993335684 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6263121063680895, @@ -150,9 +150,9 @@ "main_score": 0.6322323153577603 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4689260995001763, + "f1_macro,all": 0.6252347993335684, "acc,all": 0.6925373134328359, - "main_score": 0.4689260995001763 + "main_score": 0.6252347993335684 } }, "config_tasks": { diff --git a/JJhooww/Mistral_Relora_Step2k/raw_2024-03-09T08-42-21.029909/results.json b/JJhooww/Mistral_Relora_Step2k/raw_2024-03-09T08-42-21.029909/results.json index 61d54077bfd79d4862a296515b5b438407c79028..b2fe3e491b3290dfd226dbb72fe2a6f9afd7fdd0 100644 --- a/JJhooww/Mistral_Relora_Step2k/raw_2024-03-09T08-42-21.029909/results.json +++ b/JJhooww/Mistral_Relora_Step2k/raw_2024-03-09T08-42-21.029909/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9113496854193482, - "acc,all": 0.9113562091503268, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7074610038971542, - "mse,all": 0.7742524509803921, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5257301808066759, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2020": 0.4909090909090909, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "acc,exam_id__USP_2022": 0.4489795918367347, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.615815255423373, - "acc,exam_id__2011": 0.6837606837606838, - "acc,exam_id__2014": 0.6513761467889908, - "acc,exam_id__2012": 0.5948275862068966, - "acc,exam_id__2016_2": 0.6097560975609756, - "acc,exam_id__2013": 0.6296296296296297, - "acc,exam_id__2009": 0.6, - "acc,exam_id__2022": 0.5939849624060151, - "acc,exam_id__2010": 0.5897435897435898, - "acc,exam_id__2017": 0.6206896551724138, - "acc,exam_id__2023": 0.6592592592592592, - "acc,exam_id__2015": 0.5630252100840336, - "acc,exam_id__2016": 0.5950413223140496 - }, - "faquad_nli": { - "f1_macro,all": 0.6526577185427341, - "acc,all": 0.816923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8133973664850924, - "acc,all": 0.8157142857142857 - }, - "oab_exams": { - "acc,all": 0.3981776765375854, - "acc,exam_id__2016-20a": 0.325, - "acc,exam_id__2012-07": 0.4125, - "acc,exam_id__2015-16": 0.3375, - "acc,exam_id__2010-01": 0.3176470588235294, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2017-23": 0.3625, - "acc,exam_id__2012-08": 0.3875, - "acc,exam_id__2012-06": 0.5125, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2011-04": 0.3125, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2013-10": 0.35, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2017-24": 0.3875, - "acc,exam_id__2011-05": 0.35, - "acc,exam_id__2015-17": 0.44871794871794873, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2015-18": 0.3625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6536416538696902, - "acc,all": 0.6686251468860165 - }, - "tweetsentbr": { - "f1_macro,all": 0.5193585604823832, - "acc,all": 0.7074626865671642, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9113496854193482, + "acc,all": 0.9113562091503268, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7074610038971542, + "mse,all": 0.7742524509803921, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5257301808066759, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2020": 0.4909090909090909, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "acc,exam_id__USP_2022": 0.4489795918367347, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.615815255423373, + "acc,exam_id__2011": 0.6837606837606838, + "acc,exam_id__2014": 0.6513761467889908, + "acc,exam_id__2012": 0.5948275862068966, + "acc,exam_id__2016_2": 0.6097560975609756, + "acc,exam_id__2013": 0.6296296296296297, + "acc,exam_id__2009": 0.6, + "acc,exam_id__2022": 0.5939849624060151, + "acc,exam_id__2010": 0.5897435897435898, + "acc,exam_id__2017": 0.6206896551724138, + "acc,exam_id__2023": 0.6592592592592592, + "acc,exam_id__2015": 0.5630252100840336, + "acc,exam_id__2016": 0.5950413223140496 + }, + "faquad_nli": { + "f1_macro,all": 0.6526577185427341, + "acc,all": 0.816923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8133973664850924, + "acc,all": 0.8157142857142857 + }, + "oab_exams": { + "acc,all": 0.3981776765375854, + "acc,exam_id__2016-20a": 0.325, + "acc,exam_id__2012-07": 0.4125, + "acc,exam_id__2015-16": 0.3375, + "acc,exam_id__2010-01": 0.3176470588235294, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2017-23": 0.3625, + "acc,exam_id__2012-08": 0.3875, + "acc,exam_id__2012-06": 0.5125, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2011-04": 0.3125, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2013-10": 0.35, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2017-24": 0.3875, + "acc,exam_id__2011-05": 0.35, + "acc,exam_id__2015-17": 0.44871794871794873, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2015-18": 0.3625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6536416538696902, + "acc,all": 0.6686251468860165 + }, + "tweetsentbr": { + "f1_macro,all": 0.6924780806431777, + "acc,all": 0.7074626865671642, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "3bd728425d680d1f0472b1cdc553f036bfc90c48", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:1", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "3bd728425d680d1f0472b1cdc553f036bfc90c48", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:1", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=JJhooww/Mistral_Relora_Step2k,dtype=float16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=JJhooww/Mistral_Relora_Step2k,dtype=float16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/JJhooww/Mistral_Relora_Step2k/raw_2024-04-20T03-09-26.234801/results.json b/JJhooww/Mistral_Relora_Step2k/raw_2024-04-20T03-09-26.234801/results.json index d90ec67b1ccd72b72ca90beab41baa4406f08de2..dac1e262d1bc4de1057e2d16e129bf55684df9bf 100644 --- a/JJhooww/Mistral_Relora_Step2k/raw_2024-04-20T03-09-26.234801/results.json +++ b/JJhooww/Mistral_Relora_Step2k/raw_2024-04-20T03-09-26.234801/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9121625173669783, - "acc,all": 0.9121732026143791, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7065946896645577, - "mse,all": 0.7988970588235295, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5159944367176634, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2022": 0.4489795918367347, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__USP_2019": 0.525, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2018": 0.3888888888888889, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2020": 0.45454545454545453, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6179146256123164, - "acc,exam_id__2014": 0.6513761467889908, - "acc,exam_id__2017": 0.6206896551724138, - "acc,exam_id__2009": 0.6086956521739131, - "acc,exam_id__2016": 0.6033057851239669, - "acc,exam_id__2013": 0.6203703703703703, - "acc,exam_id__2015": 0.5630252100840336, - "acc,exam_id__2016_2": 0.6178861788617886, - "acc,exam_id__2023": 0.6518518518518519, - "acc,exam_id__2011": 0.6837606837606838, - "acc,exam_id__2022": 0.5939849624060151, - "acc,exam_id__2012": 0.6120689655172413, - "acc,exam_id__2010": 0.5897435897435898 - }, - "faquad_nli": { - "f1_macro,all": 0.6466313961043266, - "acc,all": 0.8107692307692308, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8143254279726638, - "acc,all": 0.8164285714285714 - }, - "oab_exams": { - "acc,all": 0.39635535307517084, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2013-10": 0.35, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2016-21": 0.35, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2010-01": 0.3176470588235294, - "acc,exam_id__2015-16": 0.3375, - "acc,exam_id__2016-20a": 0.325, - "acc,exam_id__2011-05": 0.35, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2015-18": 0.375, - "acc,exam_id__2017-23": 0.3625, - "acc,exam_id__2015-17": 0.44871794871794873, - "acc,exam_id__2011-04": 0.3375, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2012-07": 0.4125, - "acc,exam_id__2012-09": 0.35064935064935066, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.652940879778074, - "acc,all": 0.6674500587544065 - }, - "tweetsentbr": { - "f1_macro,all": 0.5167597069914197, - "acc,all": 0.6950248756218905, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9121625173669783, + "acc,all": 0.9121732026143791, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7065946896645577, + "mse,all": 0.7988970588235295, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5159944367176634, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2022": 0.4489795918367347, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__USP_2019": 0.525, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2018": 0.3888888888888889, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2020": 0.45454545454545453, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6179146256123164, + "acc,exam_id__2014": 0.6513761467889908, + "acc,exam_id__2017": 0.6206896551724138, + "acc,exam_id__2009": 0.6086956521739131, + "acc,exam_id__2016": 0.6033057851239669, + "acc,exam_id__2013": 0.6203703703703703, + "acc,exam_id__2015": 0.5630252100840336, + "acc,exam_id__2016_2": 0.6178861788617886, + "acc,exam_id__2023": 0.6518518518518519, + "acc,exam_id__2011": 0.6837606837606838, + "acc,exam_id__2022": 0.5939849624060151, + "acc,exam_id__2012": 0.6120689655172413, + "acc,exam_id__2010": 0.5897435897435898 + }, + "faquad_nli": { + "f1_macro,all": 0.6466313961043266, + "acc,all": 0.8107692307692308, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8143254279726638, + "acc,all": 0.8164285714285714 + }, + "oab_exams": { + "acc,all": 0.39635535307517084, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2013-10": 0.35, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2016-21": 0.35, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2010-01": 0.3176470588235294, + "acc,exam_id__2015-16": 0.3375, + "acc,exam_id__2016-20a": 0.325, + "acc,exam_id__2011-05": 0.35, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2015-18": 0.375, + "acc,exam_id__2017-23": 0.3625, + "acc,exam_id__2015-17": 0.44871794871794873, + "acc,exam_id__2011-04": 0.3375, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2012-07": 0.4125, + "acc,exam_id__2012-09": 0.35064935064935066, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.652940879778074, + "acc,all": 0.6674500587544065 + }, + "tweetsentbr": { + "f1_macro,all": 0.6890129426552263, + "acc,all": 0.6950248756218905, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "3c5ea0dafa4d1019739691987b88f38dae28eba1", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 64, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "3c5ea0dafa4d1019739691987b88f38dae28eba1", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 64, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1881.2492537313433, + "min_seq_length": 1860, + "max_seq_length": 1976, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=JJhooww/Mistral_Relora_Step2k,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1881.2492537313433, - "min_seq_length": 1860, - "max_seq_length": 1976, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=JJhooww/Mistral_Relora_Step2k,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/JJhooww/Mistral_Relora_Step2k/results_2024-03-09T08-42-21.029909.json b/JJhooww/Mistral_Relora_Step2k/results_2024-03-09T08-42-21.029909.json index 4d4cea1271fb6318cf1a6688b46eeefec62d1239..b217305d735dd2f434237a9ccbdafea1639ae493 100644 --- a/JJhooww/Mistral_Relora_Step2k/results_2024-03-09T08-42-21.029909.json +++ b/JJhooww/Mistral_Relora_Step2k/results_2024-03-09T08-42-21.029909.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6441765668293375, - "all_grouped_npm": 0.4715334429973541, + "all_grouped_average": 0.6634120690694257, + "all_grouped_npm": 0.5001577022831998, "all_grouped": { "enem_challenge": 0.615815255423373, "bluex": 0.5257301808066759, @@ -45,7 +45,7 @@ "faquad_nli": 0.6526577185427341, "hatebr_offensive": 0.8133973664850924, "portuguese_hate_speech": 0.6536416538696902, - "tweetsentbr": 0.5193585604823832 + "tweetsentbr": 0.6924780806431777 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.615815255423373, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6526577185427341, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8133973664850924, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6536416538696902, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5193585604823832 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6924780806431777 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.615815255423373, @@ -150,9 +150,9 @@ "main_score": 0.6536416538696902 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5193585604823832, + "f1_macro,all": 0.6924780806431777, "acc,all": 0.7074626865671642, - "main_score": 0.5193585604823832 + "main_score": 0.6924780806431777 } }, "config_tasks": { diff --git a/JJhooww/Mistral_Relora_Step2k/results_2024-04-20T03-09-26.234801.json b/JJhooww/Mistral_Relora_Step2k/results_2024-04-20T03-09-26.234801.json index ad6cfa3bd992bbb5ee5501485f2d231d4d8ff7d0..2d3e782ac59a20fbcfd761eedcf1901416e6ef35 100644 --- a/JJhooww/Mistral_Relora_Step2k/results_2024-04-20T03-09-26.234801.json +++ b/JJhooww/Mistral_Relora_Step2k/results_2024-04-20T03-09-26.234801.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6421865592536856, - "all_grouped_npm": 0.46863982903930285, + "all_grouped_average": 0.6613258076607753, + "all_grouped_npm": 0.4971208534546147, "all_grouped": { "enem_challenge": 0.6179146256123164, "bluex": 0.5159944367176634, @@ -45,7 +45,7 @@ "faquad_nli": 0.6466313961043266, "hatebr_offensive": 0.8143254279726638, "portuguese_hate_speech": 0.652940879778074, - "tweetsentbr": 0.5167597069914197 + "tweetsentbr": 0.6890129426552263 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6179146256123164, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6466313961043266, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8143254279726638, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.652940879778074, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5167597069914197 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6890129426552263 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6179146256123164, @@ -150,9 +150,9 @@ "main_score": 0.652940879778074 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5167597069914197, + "f1_macro,all": 0.6890129426552263, "acc,all": 0.6950248756218905, - "main_score": 0.5167597069914197 + "main_score": 0.6890129426552263 } }, "config_tasks": { diff --git a/Kquant03/CognitiveFusion2-4x7B-BF16/raw_2024-05-19T01-32-18.922295/results.json b/Kquant03/CognitiveFusion2-4x7B-BF16/raw_2024-05-19T01-32-18.922295/results.json index e4a50ad62bafebaad838c527fd23d22c4aeaba92..ccb6118fdf68dc71decd2d829bc22ab0c709bd7d 100644 --- a/Kquant03/CognitiveFusion2-4x7B-BF16/raw_2024-05-19T01-32-18.922295/results.json +++ b/Kquant03/CognitiveFusion2-4x7B-BF16/raw_2024-05-19T01-32-18.922295/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9199303114955995, - "acc,all": 0.9199346405228758, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7779218286784337, - "mse,all": 0.4303513071895425, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5438108484005564, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__USP_2021": 0.4807692307692308, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6389083275017495, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2010": 0.6837606837606838, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2023": 0.6444444444444445, - "acc,exam_id__2012": 0.6293103448275862 - }, - "faquad_nli": { - "f1_macro,all": 0.7730900759529709, - "acc,all": 0.8415384615384616, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8170741905271608, - "acc,all": 0.8214285714285714 - }, - "oab_exams": { - "acc,all": 0.4145785876993166, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2017-22": 0.5125, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2017-24": 0.3875, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2015-16": 0.375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7076245180055771, - "acc,all": 0.7579318448883666 - }, - "tweetsentbr": { - "f1_macro,all": 0.49451740077068485, - "acc,all": 0.7069651741293532, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9199303114955995, + "acc,all": 0.9199346405228758, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7779218286784337, + "mse,all": 0.4303513071895425, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5438108484005564, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__USP_2021": 0.4807692307692308, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6389083275017495, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2010": 0.6837606837606838, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2023": 0.6444444444444445, + "acc,exam_id__2012": 0.6293103448275862 + }, + "faquad_nli": { + "f1_macro,all": 0.7730900759529709, + "acc,all": 0.8415384615384616, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8170741905271608, + "acc,all": 0.8214285714285714 + }, + "oab_exams": { + "acc,all": 0.4145785876993166, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2017-22": 0.5125, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2017-24": 0.3875, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2015-16": 0.375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7076245180055771, + "acc,all": 0.7579318448883666 + }, + "tweetsentbr": { + "f1_macro,all": 0.6593565343609131, + "acc,all": 0.7069651741293532, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "db45b86c462bb93db7ba4f2c3fe3517582c859a1", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "db45b86c462bb93db7ba4f2c3fe3517582c859a1", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Kquant03/CognitiveFusion2-4x7B-BF16,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Kquant03/CognitiveFusion2-4x7B-BF16,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/Kquant03/CognitiveFusion2-4x7B-BF16/results_2024-05-19T01-32-18.922295.json b/Kquant03/CognitiveFusion2-4x7B-BF16/results_2024-05-19T01-32-18.922295.json index b32b512d5b2a005d08dc32326ca6e52fe685eed8..7439392bf9b81ad843e6cb2bbb8516a54ad8b6d3 100644 --- a/Kquant03/CognitiveFusion2-4x7B-BF16/results_2024-05-19T01-32-18.922295.json +++ b/Kquant03/CognitiveFusion2-4x7B-BF16/results_2024-05-19T01-32-18.922295.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.67638400989245, - "all_grouped_npm": 0.5223190853843742, + "all_grouped_average": 0.6946994691802532, + "all_grouped_npm": 0.5495742331340812, "all_grouped": { "enem_challenge": 0.6389083275017495, "bluex": 0.5438108484005564, @@ -45,7 +45,7 @@ "faquad_nli": 0.7730900759529709, "hatebr_offensive": 0.8170741905271608, "portuguese_hate_speech": 0.7076245180055771, - "tweetsentbr": 0.49451740077068485 + "tweetsentbr": 0.6593565343609131 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6389083275017495, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7730900759529709, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8170741905271608, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7076245180055771, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49451740077068485 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6593565343609131 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6389083275017495, @@ -150,9 +150,9 @@ "main_score": 0.7076245180055771 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49451740077068485, + "f1_macro,all": 0.6593565343609131, "acc,all": 0.7069651741293532, - "main_score": 0.49451740077068485 + "main_score": 0.6593565343609131 } }, "config_tasks": { diff --git a/Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/raw_2024-07-28T01-32-22.165106/results.json b/Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/raw_2024-07-28T01-32-22.165106/results.json index c67b0dee9053a49034ac0f34d804c9c81a06c518..b4fb3ecbdcde0e79518e6ffa646cc5cf2d3adf36 100644 --- a/Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/raw_2024-07-28T01-32-22.165106/results.json +++ b/Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/raw_2024-07-28T01-32-22.165106/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9207426934587088, - "acc,all": 0.920751633986928, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7806469957644414, - "mse,all": 0.43004901960784325, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5271210013908206, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2018": 0.3888888888888889, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__UNICAMP_2023": 0.5581395348837209, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6431070678796361, - "acc,exam_id__2016": 0.5702479338842975, - "acc,exam_id__2013": 0.6666666666666666, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2023": 0.6814814814814815, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2010": 0.7094017094017094, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2015": 0.5966386554621849, - "acc,exam_id__2014": 0.5871559633027523, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2022": 0.6165413533834586 - }, - "faquad_nli": { - "f1_macro,all": 0.7641446815289443, - "acc,all": 0.8107692307692308, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8412698412698412, - "acc,all": 0.8428571428571429 - }, - "oab_exams": { - "acc,all": 0.41594533029612757, - "acc,exam_id__2017-24": 0.3375, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2015-16": 0.35, - "acc,exam_id__2016-20a": 0.3625, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2012-09": 0.38961038961038963, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2012-06a": 0.375, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2013-10": 0.4, - "acc,exam_id__2018-25": 0.4875, - "acc,exam_id__2014-13": 0.3125, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2015-18": 0.4375, - "acc,exam_id__2016-19": 0.48717948717948717, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2017-22": 0.55, - "acc,exam_id__2012-08": 0.425, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2013-12": 0.4, - "acc,exam_id__2010-02": 0.43, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6731605919879914, - "acc,all": 0.6921269095182139 - }, - "tweetsentbr": { - "f1_macro,all": 0.49723454010215834, - "acc,all": 0.7059701492537314, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9207426934587088, + "acc,all": 0.920751633986928, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7806469957644414, + "mse,all": 0.43004901960784325, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5271210013908206, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2018": 0.3888888888888889, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__UNICAMP_2023": 0.5581395348837209, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6431070678796361, + "acc,exam_id__2016": 0.5702479338842975, + "acc,exam_id__2013": 0.6666666666666666, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2023": 0.6814814814814815, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2010": 0.7094017094017094, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2015": 0.5966386554621849, + "acc,exam_id__2014": 0.5871559633027523, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2022": 0.6165413533834586 + }, + "faquad_nli": { + "f1_macro,all": 0.7641446815289443, + "acc,all": 0.8107692307692308, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8412698412698412, + "acc,all": 0.8428571428571429 + }, + "oab_exams": { + "acc,all": 0.41594533029612757, + "acc,exam_id__2017-24": 0.3375, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2015-16": 0.35, + "acc,exam_id__2016-20a": 0.3625, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2012-09": 0.38961038961038963, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2012-06a": 0.375, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2013-10": 0.4, + "acc,exam_id__2018-25": 0.4875, + "acc,exam_id__2014-13": 0.3125, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2015-18": 0.4375, + "acc,exam_id__2016-19": 0.48717948717948717, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2017-22": 0.55, + "acc,exam_id__2012-08": 0.425, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2013-12": 0.4, + "acc,exam_id__2010-02": 0.43, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6731605919879914, + "acc,all": 0.6921269095182139 + }, + "tweetsentbr": { + "f1_macro,all": 0.6629793868028777, + "acc,all": 0.7059701492537314, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "43ea8d27d652dc15e4d27f665c5d636a5937780b", - "model_dtype": "torch.float16", - "model_memory_footprint": 14483472384, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "43ea8d27d652dc15e4d27f665c5d636a5937780b", + "model_dtype": "torch.float16", + "model_memory_footprint": 14483472384, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/results_2024-07-28T01-32-22.165106.json b/Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/results_2024-07-28T01-32-22.165106.json index c3808a04636eb8e1356655d0b115ed1150a87d76..2732964daf48921da99e3705552424e52ee1f768 100644 --- a/Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/results_2024-07-28T01-32-22.165106.json +++ b/Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/results_2024-07-28T01-32-22.165106.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6737080826309633, - "all_grouped_npm": 0.5178442795207305, + "all_grouped_average": 0.692124176708821, + "all_grouped_npm": 0.5452491814223044, "all_grouped": { "enem_challenge": 0.6431070678796361, "bluex": 0.5271210013908206, @@ -45,7 +45,7 @@ "faquad_nli": 0.7641446815289443, "hatebr_offensive": 0.8412698412698412, "portuguese_hate_speech": 0.6731605919879914, - "tweetsentbr": 0.49723454010215834 + "tweetsentbr": 0.6629793868028777 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6431070678796361, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7641446815289443, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8412698412698412, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6731605919879914, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49723454010215834 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6629793868028777 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6431070678796361, @@ -150,9 +150,9 @@ "main_score": 0.6731605919879914 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49723454010215834, + "f1_macro,all": 0.6629793868028777, "acc,all": 0.7059701492537314, - "main_score": 0.49723454010215834 + "main_score": 0.6629793868028777 } }, "config_tasks": { diff --git a/Kukedlc/NeuralSynthesis-7B-v0.1/raw_2024-06-15T14-00-57.278364/results.json b/Kukedlc/NeuralSynthesis-7B-v0.1/raw_2024-06-15T14-00-57.278364/results.json index b66ccf29d5f92f4195611dffb43020ad423e5f9d..458ee3ba81cbe60eaac52669d3446c0568ff1f3e 100644 --- a/Kukedlc/NeuralSynthesis-7B-v0.1/raw_2024-06-15T14-00-57.278364/results.json +++ b/Kukedlc/NeuralSynthesis-7B-v0.1/raw_2024-06-15T14-00-57.278364/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.924014535978148, - "acc,all": 0.9240196078431373, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7798626544905576, - "mse,all": 0.42701388888888886, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5479833101529903, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6347095871238628, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2011": 0.6581196581196581, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2010": 0.6837606837606838, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2016": 0.5785123966942148, - "acc,exam_id__2016_2": 0.5934959349593496, - "acc,exam_id__2023": 0.6296296296296297 - }, - "faquad_nli": { - "f1_macro,all": 0.7806541284802154, - "acc,all": 0.8446153846153847, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8201619169361105, - "acc,all": 0.8242857142857143 - }, - "oab_exams": { - "acc,all": 0.414123006833713, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2017-22": 0.55, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2016-19": 0.48717948717948717, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2012-08": 0.425, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2011-03": 0.35353535353535354, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.705477537437604, - "acc,all": 0.7555816686251469 - }, - "tweetsentbr": { - "f1_macro,all": 0.4910589380525191, - "acc,all": 0.7029850746268657, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.924014535978148, + "acc,all": 0.9240196078431373, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7798626544905576, + "mse,all": 0.42701388888888886, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5479833101529903, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6347095871238628, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2011": 0.6581196581196581, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2010": 0.6837606837606838, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2016": 0.5785123966942148, + "acc,exam_id__2016_2": 0.5934959349593496, + "acc,exam_id__2023": 0.6296296296296297 + }, + "faquad_nli": { + "f1_macro,all": 0.7806541284802154, + "acc,all": 0.8446153846153847, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8201619169361105, + "acc,all": 0.8242857142857143 + }, + "oab_exams": { + "acc,all": 0.414123006833713, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2017-22": 0.55, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2016-19": 0.48717948717948717, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2012-08": 0.425, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2011-03": 0.35353535353535354, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.705477537437604, + "acc,all": 0.7555816686251469 + }, + "tweetsentbr": { + "f1_macro,all": 0.6547452507366921, + "acc,all": 0.7029850746268657, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "547a5dc8963e127a9638256bb80eb3a36da1cc5d", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "547a5dc8963e127a9638256bb80eb3a36da1cc5d", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Kukedlc/NeuralSynthesis-7B-v0.1,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Kukedlc/NeuralSynthesis-7B-v0.1,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/Kukedlc/NeuralSynthesis-7B-v0.1/results_2024-06-15T14-00-57.278364.json b/Kukedlc/NeuralSynthesis-7B-v0.1/results_2024-06-15T14-00-57.278364.json index 33224c20c122f3ee1e6f67aeebbaa6e90dd2d9f4..26ffbd434ab9c5431372e5cd80f918e26f49e000 100644 --- a/Kukedlc/NeuralSynthesis-7B-v0.1/results_2024-06-15T14-00-57.278364.json +++ b/Kukedlc/NeuralSynthesis-7B-v0.1/results_2024-06-15T14-00-57.278364.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6775606239428578, - "all_grouped_npm": 0.5245912837173908, + "all_grouped_average": 0.695747992018877, + "all_grouped_npm": 0.5516558195448004, "all_grouped": { "enem_challenge": 0.6347095871238628, "bluex": 0.5479833101529903, @@ -45,7 +45,7 @@ "faquad_nli": 0.7806541284802154, "hatebr_offensive": 0.8201619169361105, "portuguese_hate_speech": 0.705477537437604, - "tweetsentbr": 0.4910589380525191 + "tweetsentbr": 0.6547452507366921 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6347095871238628, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7806541284802154, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8201619169361105, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.705477537437604, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4910589380525191 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6547452507366921 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6347095871238628, @@ -150,9 +150,9 @@ "main_score": 0.705477537437604 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4910589380525191, + "f1_macro,all": 0.6547452507366921, "acc,all": 0.7029850746268657, - "main_score": 0.4910589380525191 + "main_score": 0.6547452507366921 } }, "config_tasks": { diff --git a/Kukedlc/NeuralSynthesis-7B-v0.3/raw_2024-08-07T06-51-04.901695/results.json b/Kukedlc/NeuralSynthesis-7B-v0.3/raw_2024-08-07T06-51-04.901695/results.json index 28cd545b2a6f3c905d0d951a13929ceadde3850c..9b834b312390c70156c6c0ed9999947573bb570f 100644 --- a/Kukedlc/NeuralSynthesis-7B-v0.3/raw_2024-08-07T06-51-04.901695/results.json +++ b/Kukedlc/NeuralSynthesis-7B-v0.3/raw_2024-08-07T06-51-04.901695/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.922380439977678, - "acc,all": 0.9223856209150327, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7790400956364452, - "mse,all": 0.4273447712418301, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5424200278164116, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__USP_2022": 0.5102040816326531, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__USP_2020": 0.5178571428571429, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6375087473757872, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2010": 0.6837606837606838, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2012": 0.6293103448275862, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2017": 0.6637931034482759 - }, - "faquad_nli": { - "f1_macro,all": 0.7796173768257517, - "acc,all": 0.8446153846153847, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8186191231446596, - "acc,all": 0.8228571428571428 - }, - "oab_exams": { - "acc,all": 0.4145785876993166, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2014-13": 0.3125, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2015-17": 0.48717948717948717, - "acc,exam_id__2016-21": 0.35, - "acc,exam_id__2013-10": 0.3875, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2011-04": 0.4, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7040870914245975, - "acc,all": 0.7555816686251469 - }, - "tweetsentbr": { - "f1_macro,all": 0.4942320660148471, - "acc,all": 0.7064676616915423, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.922380439977678, + "acc,all": 0.9223856209150327, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7790400956364452, + "mse,all": 0.4273447712418301, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5424200278164116, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__USP_2022": 0.5102040816326531, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__USP_2020": 0.5178571428571429, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6375087473757872, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2010": 0.6837606837606838, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2012": 0.6293103448275862, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2017": 0.6637931034482759 + }, + "faquad_nli": { + "f1_macro,all": 0.7796173768257517, + "acc,all": 0.8446153846153847, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8186191231446596, + "acc,all": 0.8228571428571428 + }, + "oab_exams": { + "acc,all": 0.4145785876993166, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2014-13": 0.3125, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2015-17": 0.48717948717948717, + "acc,exam_id__2016-21": 0.35, + "acc,exam_id__2013-10": 0.3875, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2011-04": 0.4, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7040870914245975, + "acc,all": 0.7555816686251469 + }, + "tweetsentbr": { + "f1_macro,all": 0.6589760880197961, + "acc,all": 0.7064676616915423, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "090fab29146f8e55066bce2f5f5859ab2d6027f4", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14483472384, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "090fab29146f8e55066bce2f5f5859ab2d6027f4", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14483472384, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Kukedlc/NeuralSynthesis-7B-v0.3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Kukedlc/NeuralSynthesis-7B-v0.3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/Kukedlc/NeuralSynthesis-7B-v0.3/results_2024-08-07T06-51-04.901695.json b/Kukedlc/NeuralSynthesis-7B-v0.3/results_2024-08-07T06-51-04.901695.json index 3b87041832fd127f0be23657eb6ae6785df6678e..c79f2808f214306e8941c8170b9240fae5c51a89 100644 --- a/Kukedlc/NeuralSynthesis-7B-v0.3/results_2024-08-07T06-51-04.901695.json +++ b/Kukedlc/NeuralSynthesis-7B-v0.3/results_2024-08-07T06-51-04.901695.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6769426173239439, - "all_grouped_npm": 0.5234689442740138, + "all_grouped_average": 0.695247508657827, + "all_grouped_npm": 0.5507083659018162, "all_grouped": { "enem_challenge": 0.6375087473757872, "bluex": 0.5424200278164116, @@ -45,7 +45,7 @@ "faquad_nli": 0.7796173768257517, "hatebr_offensive": 0.8186191231446596, "portuguese_hate_speech": 0.7040870914245975, - "tweetsentbr": 0.4942320660148471 + "tweetsentbr": 0.6589760880197961 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6375087473757872, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7796173768257517, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8186191231446596, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7040870914245975, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4942320660148471 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6589760880197961 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6375087473757872, @@ -150,9 +150,9 @@ "main_score": 0.7040870914245975 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4942320660148471, + "f1_macro,all": 0.6589760880197961, "acc,all": 0.7064676616915423, - "main_score": 0.4942320660148471 + "main_score": 0.6589760880197961 } }, "config_tasks": { diff --git a/Kukedlc/NeuralSynthesis-7b-v0.4-slerp/raw_2024-06-16T02-26-33.125166/results.json b/Kukedlc/NeuralSynthesis-7b-v0.4-slerp/raw_2024-06-16T02-26-33.125166/results.json index a50012e5d6b6405a5fa6c6c5de52e097bfd6174c..908c2373eeb3d052d47df56b14c7c8790ff69a0c 100644 --- a/Kukedlc/NeuralSynthesis-7b-v0.4-slerp/raw_2024-06-16T02-26-33.125166/results.json +++ b/Kukedlc/NeuralSynthesis-7b-v0.4-slerp/raw_2024-06-16T02-26-33.125166/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9227873017727286, - "acc,all": 0.9227941176470589, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7796159438413869, - "mse,all": 0.426936274509804, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5382475660639777, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__USP_2024": 0.7560975609756098, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6382085374387684, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2010": 0.6837606837606838, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2016_2": 0.6097560975609756, - "acc,exam_id__2012": 0.6293103448275862, - "acc,exam_id__2016": 0.5785123966942148, - "acc,exam_id__2009": 0.6521739130434783 - }, - "faquad_nli": { - "f1_macro,all": 0.7812802842683321, - "acc,all": 0.8461538461538461, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8163838486760461, - "acc,all": 0.8207142857142857 - }, - "oab_exams": { - "acc,all": 0.4154897494305239, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2015-18": 0.425, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2012-06": 0.475, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2013-10": 0.4, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2018-25": 0.4625, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2017-22": 0.5375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7065502576224769, - "acc,all": 0.7567567567567568 - }, - "tweetsentbr": { - "f1_macro,all": 0.49494721606080616, - "acc,all": 0.7074626865671642, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9227873017727286, + "acc,all": 0.9227941176470589, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7796159438413869, + "mse,all": 0.426936274509804, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5382475660639777, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__USP_2024": 0.7560975609756098, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6382085374387684, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2010": 0.6837606837606838, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2016_2": 0.6097560975609756, + "acc,exam_id__2012": 0.6293103448275862, + "acc,exam_id__2016": 0.5785123966942148, + "acc,exam_id__2009": 0.6521739130434783 + }, + "faquad_nli": { + "f1_macro,all": 0.7812802842683321, + "acc,all": 0.8461538461538461, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8163838486760461, + "acc,all": 0.8207142857142857 + }, + "oab_exams": { + "acc,all": 0.4154897494305239, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2015-18": 0.425, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2012-06": 0.475, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2013-10": 0.4, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2018-25": 0.4625, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2017-22": 0.5375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7065502576224769, + "acc,all": 0.7567567567567568 + }, + "tweetsentbr": { + "f1_macro,all": 0.6599296214144083, + "acc,all": 0.7074626865671642, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "bb3bd36fce162f472668dbd91960cd1525b45f30", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "bb3bd36fce162f472668dbd91960cd1525b45f30", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Kukedlc/NeuralSynthesis-7b-v0.4-slerp,dtype=bfloat16,device=cuda:0,revision=bb3bd36fce162f472668dbd91960cd1525b45f30,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Kukedlc/NeuralSynthesis-7b-v0.4-slerp,dtype=bfloat16,device=cuda:0,revision=bb3bd36fce162f472668dbd91960cd1525b45f30,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/Kukedlc/NeuralSynthesis-7b-v0.4-slerp/results_2024-06-16T02-26-33.125166.json b/Kukedlc/NeuralSynthesis-7b-v0.4-slerp/results_2024-06-16T02-26-33.125166.json index 6aff69f8c5a3e36fa90683e1df43cc6dd30b7bfa..85b0c3bb272ff6cb19a7162b0b3c5df2c63e2050 100644 --- a/Kukedlc/NeuralSynthesis-7b-v0.4-slerp/results_2024-06-16T02-26-33.125166.json +++ b/Kukedlc/NeuralSynthesis-7b-v0.4-slerp/results_2024-06-16T02-26-33.125166.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6770567450194497, - "all_grouped_npm": 0.5237437902599286, + "all_grouped_average": 0.6953881233920721, + "all_grouped_npm": 0.55102262712395, "all_grouped": { "enem_challenge": 0.6382085374387684, "bluex": 0.5382475660639777, @@ -45,7 +45,7 @@ "faquad_nli": 0.7812802842683321, "hatebr_offensive": 0.8163838486760461, "portuguese_hate_speech": 0.7065502576224769, - "tweetsentbr": 0.49494721606080616 + "tweetsentbr": 0.6599296214144083 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6382085374387684, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7812802842683321, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8163838486760461, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7065502576224769, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49494721606080616 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6599296214144083 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6382085374387684, @@ -150,9 +150,9 @@ "main_score": 0.7065502576224769 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49494721606080616, + "f1_macro,all": 0.6599296214144083, "acc,all": 0.7074626865671642, - "main_score": 0.49494721606080616 + "main_score": 0.6599296214144083 } }, "config_tasks": { diff --git a/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/raw_2024-08-11T07-21-08.591192/results.json b/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/raw_2024-08-11T07-21-08.591192/results.json index 566478587405e4fec06e4ab1c909e7cbf2db4ff9..524985f2cf686e4b5dcfe94e04eda8427d8f13b9 100644 --- a/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/raw_2024-08-11T07-21-08.591192/results.json +++ b/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/raw_2024-08-11T07-21-08.591192/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.872773896630295, - "acc,all": 0.8729575163398693, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7643433566585385, - "mse,all": 0.479750816993464, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.40333796940194716, - "acc,exam_id__UNICAMP_2020": 0.38181818181818183, - "acc,exam_id__UNICAMP_2022": 0.48717948717948717, - "acc,exam_id__UNICAMP_2021_1": 0.41304347826086957, - "acc,exam_id__USP_2023": 0.5227272727272727, - "acc,exam_id__USP_2018": 0.3888888888888889, - "acc,exam_id__UNICAMP_2023": 0.4186046511627907, - "acc,exam_id__UNICAMP_2024": 0.4, - "acc,exam_id__USP_2021": 0.3269230769230769, - "acc,exam_id__USP_2020": 0.35714285714285715, - "acc,exam_id__UNICAMP_2019": 0.42, - "acc,exam_id__UNICAMP_2018": 0.3333333333333333, - "acc,exam_id__USP_2024": 0.5609756097560976, - "acc,exam_id__UNICAMP_2021_2": 0.35294117647058826, - "acc,exam_id__USP_2022": 0.3877551020408163, - "acc,exam_id__USP_2019": 0.375, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5675297410776767, - "acc,exam_id__2009": 0.5652173913043478, - "acc,exam_id__2023": 0.5851851851851851, - "acc,exam_id__2017": 0.5086206896551724, - "acc,exam_id__2015": 0.5546218487394958, - "acc,exam_id__2010": 0.5641025641025641, - "acc,exam_id__2012": 0.603448275862069, - "acc,exam_id__2016": 0.5454545454545454, - "acc,exam_id__2013": 0.5740740740740741, - "acc,exam_id__2014": 0.5504587155963303, - "acc,exam_id__2022": 0.5639097744360902, - "acc,exam_id__2016_2": 0.5528455284552846, - "acc,exam_id__2011": 0.6410256410256411 - }, - "faquad_nli": { - "f1_macro,all": 0.7424139492753623, - "acc,all": 0.8276923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7379657769346437, - "acc,all": 0.7471428571428571 - }, - "oab_exams": { - "acc,all": 0.37995444191343963, - "acc,exam_id__2014-14": 0.4625, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2013-11": 0.3, - "acc,exam_id__2015-17": 0.44871794871794873, - "acc,exam_id__2016-19": 0.41025641025641024, - "acc,exam_id__2012-09": 0.2857142857142857, - "acc,exam_id__2012-06": 0.325, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2015-18": 0.375, - "acc,exam_id__2014-13": 0.3625, - "acc,exam_id__2012-06a": 0.375, - "acc,exam_id__2017-24": 0.4, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2013-12": 0.375, - "acc,exam_id__2011-04": 0.35, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2012-07": 0.3125, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2013-10": 0.25, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2017-22": 0.45, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2011-05": 0.4, - "acc,exam_id__2010-02": 0.49, - "acc,exam_id__2010-01": 0.3176470588235294, - "acc,exam_id__2016-20": 0.425, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.46302470515207944, - "acc,all": 0.7144535840188014 - }, - "tweetsentbr": { - "f1_macro,all": 0.6339659101220709, - "acc,all": 0.6547263681592039, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.872773896630295, + "acc,all": 0.8729575163398693, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7643433566585385, + "mse,all": 0.479750816993464, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.40333796940194716, + "acc,exam_id__UNICAMP_2020": 0.38181818181818183, + "acc,exam_id__UNICAMP_2022": 0.48717948717948717, + "acc,exam_id__UNICAMP_2021_1": 0.41304347826086957, + "acc,exam_id__USP_2023": 0.5227272727272727, + "acc,exam_id__USP_2018": 0.3888888888888889, + "acc,exam_id__UNICAMP_2023": 0.4186046511627907, + "acc,exam_id__UNICAMP_2024": 0.4, + "acc,exam_id__USP_2021": 0.3269230769230769, + "acc,exam_id__USP_2020": 0.35714285714285715, + "acc,exam_id__UNICAMP_2019": 0.42, + "acc,exam_id__UNICAMP_2018": 0.3333333333333333, + "acc,exam_id__USP_2024": 0.5609756097560976, + "acc,exam_id__UNICAMP_2021_2": 0.35294117647058826, + "acc,exam_id__USP_2022": 0.3877551020408163, + "acc,exam_id__USP_2019": 0.375, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5675297410776767, + "acc,exam_id__2009": 0.5652173913043478, + "acc,exam_id__2023": 0.5851851851851851, + "acc,exam_id__2017": 0.5086206896551724, + "acc,exam_id__2015": 0.5546218487394958, + "acc,exam_id__2010": 0.5641025641025641, + "acc,exam_id__2012": 0.603448275862069, + "acc,exam_id__2016": 0.5454545454545454, + "acc,exam_id__2013": 0.5740740740740741, + "acc,exam_id__2014": 0.5504587155963303, + "acc,exam_id__2022": 0.5639097744360902, + "acc,exam_id__2016_2": 0.5528455284552846, + "acc,exam_id__2011": 0.6410256410256411 + }, + "faquad_nli": { + "f1_macro,all": 0.7424139492753623, + "acc,all": 0.8276923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7379657769346437, + "acc,all": 0.7471428571428571 + }, + "oab_exams": { + "acc,all": 0.37995444191343963, + "acc,exam_id__2014-14": 0.4625, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2013-11": 0.3, + "acc,exam_id__2015-17": 0.44871794871794873, + "acc,exam_id__2016-19": 0.41025641025641024, + "acc,exam_id__2012-09": 0.2857142857142857, + "acc,exam_id__2012-06": 0.325, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2015-18": 0.375, + "acc,exam_id__2014-13": 0.3625, + "acc,exam_id__2012-06a": 0.375, + "acc,exam_id__2017-24": 0.4, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2013-12": 0.375, + "acc,exam_id__2011-04": 0.35, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2012-07": 0.3125, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2013-10": 0.25, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2017-22": 0.45, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2011-05": 0.4, + "acc,exam_id__2010-02": 0.49, + "acc,exam_id__2010-01": 0.3176470588235294, + "acc,exam_id__2016-20": 0.425, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6945370577281191, + "acc,all": 0.7144535840188014 + }, + "tweetsentbr": { + "f1_macro,all": 0.6339659101220709, + "acc,all": 0.6547263681592039, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "7f15baedd46858153d817445aff032f4d6cf4939", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15636906240, - "model_num_parameters": 7818448896, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1543.2565359477123, - "min_seq_length": 1520, - "max_seq_length": 1610, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1781.2565359477123, - "min_seq_length": 1758, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1764.1752433936022, - "min_seq_length": 1394, - "max_seq_length": 2566, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1656.888733379986, - "min_seq_length": 1401, - "max_seq_length": 2666, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1746.66, - "min_seq_length": 1693, - "max_seq_length": 1857, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "7f15baedd46858153d817445aff032f4d6cf4939", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15636906240, + "model_num_parameters": 7818448896, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1566.8457142857144, - "min_seq_length": 1544, - "max_seq_length": 1809, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1404.804555808656, - "min_seq_length": 1143, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1543.2565359477123, + "min_seq_length": 1520, + "max_seq_length": 1610, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1781.2565359477123, + "min_seq_length": 1758, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1764.1752433936022, + "min_seq_length": 1394, + "max_seq_length": 2566, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1656.888733379986, + "min_seq_length": 1401, + "max_seq_length": 2666, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1746.66, + "min_seq_length": 1693, + "max_seq_length": 1857, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1566.8457142857144, + "min_seq_length": 1544, + "max_seq_length": 1809, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1404.804555808656, + "min_seq_length": 1143, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2061.6075205640423, + "min_seq_length": 2027, + "max_seq_length": 2095, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1812.0945273631842, + "min_seq_length": 1793, + "max_seq_length": 1859, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2061.6075205640423, - "min_seq_length": 2027, - "max_seq_length": 2095, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1812.0945273631842, - "min_seq_length": 1793, - "max_seq_length": 1859, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/results_2024-08-11T07-21-08.591192.json b/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/results_2024-08-11T07-21-08.591192.json index 64516e68822524079e347b0a2425dde6141a6986..4e1c8026bb2d554862956e4f2d18a348b2fc2b63 100644 --- a/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/results_2024-08-11T07-21-08.591192.json +++ b/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/results_2024-08-11T07-21-08.591192.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6183677496851171, - "all_grouped_npm": 0.4221956520433525, + "all_grouped_average": 0.6440913444157882, + "all_grouped_npm": 0.47156915440548514, "all_grouped": { "enem_challenge": 0.5675297410776767, "bluex": 0.40333796940194716, @@ -44,7 +44,7 @@ "assin2_sts": 0.7643433566585385, "faquad_nli": 0.7424139492753623, "hatebr_offensive": 0.7379657769346437, - "portuguese_hate_speech": 0.46302470515207944, + "portuguese_hate_speech": 0.6945370577281191, "tweetsentbr": 0.6339659101220709 }, "all": { @@ -55,7 +55,7 @@ "harness|assin2_sts|assin2_sts|None|15": 0.7643433566585385, "harness|faquad_nli|faquad_nli|None|15": 0.7424139492753623, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7379657769346437, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.46302470515207944, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6945370577281191, "harness|tweetsentbr|tweetsentbr|None|25": 0.6339659101220709 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -145,9 +145,9 @@ "main_score": 0.7379657769346437 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.46302470515207944, + "f1_macro,all": 0.6945370577281191, "acc,all": 0.7144535840188014, - "main_score": 0.46302470515207944 + "main_score": 0.6945370577281191 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.6339659101220709, diff --git a/M4-ai/tau-0.5B/raw_2024-04-23T00-49-38.450870/results.json b/M4-ai/tau-0.5B/raw_2024-04-23T00-49-38.450870/results.json index 9cbbe61dc56bcaa8592bf0573dfe0c968ddaa45e..69ddba34e08fd26573cba687ac9f1392218f2550 100644 --- a/M4-ai/tau-0.5B/raw_2024-04-23T00-49-38.450870/results.json +++ b/M4-ai/tau-0.5B/raw_2024-04-23T00-49-38.450870/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.39285662181494563, - "acc,all": 0.5277777777777778, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.07216057977107923, - "mse,all": 1.894812091503268, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.18915159944367177, - "acc,exam_id__UNICAMP_2020": 0.16363636363636364, - "acc,exam_id__UNICAMP_2023": 0.3023255813953488, - "acc,exam_id__USP_2018": 0.12962962962962962, - "acc,exam_id__USP_2024": 0.12195121951219512, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__UNICAMP_2019": 0.16, - "acc,exam_id__USP_2022": 0.1836734693877551, - "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, - "acc,exam_id__UNICAMP_2022": 0.23076923076923078, - "acc,exam_id__UNICAMP_2024": 0.17777777777777778, - "acc,exam_id__UNICAMP_2018": 0.2037037037037037, - "acc,exam_id__USP_2020": 0.19642857142857142, - "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, - "acc,exam_id__USP_2021": 0.19230769230769232, - "acc,exam_id__USP_2023": 0.11363636363636363, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.19314205738278517, - "acc,exam_id__2009": 0.16521739130434782, - "acc,exam_id__2016": 0.19008264462809918, - "acc,exam_id__2014": 0.2018348623853211, - "acc,exam_id__2011": 0.20512820512820512, - "acc,exam_id__2012": 0.1896551724137931, - "acc,exam_id__2022": 0.21052631578947367, - "acc,exam_id__2017": 0.20689655172413793, - "acc,exam_id__2023": 0.26666666666666666, - "acc,exam_id__2013": 0.16666666666666666, - "acc,exam_id__2010": 0.1623931623931624, - "acc,exam_id__2016_2": 0.1951219512195122, - "acc,exam_id__2015": 0.14285714285714285 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.222010481181515, - "acc,all": 0.4992857142857143 - }, - "oab_exams": { - "acc,all": 0.23462414578587698, - "acc,exam_id__2012-09": 0.22077922077922077, - "acc,exam_id__2015-17": 0.24358974358974358, - "acc,exam_id__2012-07": 0.15, - "acc,exam_id__2017-24": 0.225, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2016-20": 0.225, - "acc,exam_id__2017-22": 0.2625, - "acc,exam_id__2014-13": 0.2125, - "acc,exam_id__2013-10": 0.2625, - "acc,exam_id__2014-14": 0.2875, - "acc,exam_id__2012-06": 0.275, - "acc,exam_id__2013-12": 0.1875, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2013-11": 0.1875, - "acc,exam_id__2015-18": 0.2125, - "acc,exam_id__2012-08": 0.1875, - "acc,exam_id__2017-23": 0.1875, - "acc,exam_id__2011-03": 0.21212121212121213, - "acc,exam_id__2014-15": 0.2564102564102564, - "acc,exam_id__2011-04": 0.2625, - "acc,exam_id__2015-16": 0.225, - "acc,exam_id__2016-21": 0.2375, - "acc,exam_id__2011-05": 0.2875, - "acc,exam_id__2010-02": 0.24, - "acc,exam_id__2012-06a": 0.2, - "acc,exam_id__2016-19": 0.20512820512820512, - "acc,exam_id__2010-01": 0.25882352941176473, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.412292817679558, - "acc,all": 0.7015276145710928 - }, - "tweetsentbr": { - "f1_macro,all": 0.21833154883841985, - "acc,all": 0.33681592039800995, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.39285662181494563, + "acc,all": 0.5277777777777778, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.07216057977107923, + "mse,all": 1.894812091503268, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.18915159944367177, + "acc,exam_id__UNICAMP_2020": 0.16363636363636364, + "acc,exam_id__UNICAMP_2023": 0.3023255813953488, + "acc,exam_id__USP_2018": 0.12962962962962962, + "acc,exam_id__USP_2024": 0.12195121951219512, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__UNICAMP_2019": 0.16, + "acc,exam_id__USP_2022": 0.1836734693877551, + "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, + "acc,exam_id__UNICAMP_2022": 0.23076923076923078, + "acc,exam_id__UNICAMP_2024": 0.17777777777777778, + "acc,exam_id__UNICAMP_2018": 0.2037037037037037, + "acc,exam_id__USP_2020": 0.19642857142857142, + "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, + "acc,exam_id__USP_2021": 0.19230769230769232, + "acc,exam_id__USP_2023": 0.11363636363636363, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.19314205738278517, + "acc,exam_id__2009": 0.16521739130434782, + "acc,exam_id__2016": 0.19008264462809918, + "acc,exam_id__2014": 0.2018348623853211, + "acc,exam_id__2011": 0.20512820512820512, + "acc,exam_id__2012": 0.1896551724137931, + "acc,exam_id__2022": 0.21052631578947367, + "acc,exam_id__2017": 0.20689655172413793, + "acc,exam_id__2023": 0.26666666666666666, + "acc,exam_id__2013": 0.16666666666666666, + "acc,exam_id__2010": 0.1623931623931624, + "acc,exam_id__2016_2": 0.1951219512195122, + "acc,exam_id__2015": 0.14285714285714285 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.33301572177227245, + "acc,all": 0.4992857142857143 + }, + "oab_exams": { + "acc,all": 0.23462414578587698, + "acc,exam_id__2012-09": 0.22077922077922077, + "acc,exam_id__2015-17": 0.24358974358974358, + "acc,exam_id__2012-07": 0.15, + "acc,exam_id__2017-24": 0.225, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2016-20": 0.225, + "acc,exam_id__2017-22": 0.2625, + "acc,exam_id__2014-13": 0.2125, + "acc,exam_id__2013-10": 0.2625, + "acc,exam_id__2014-14": 0.2875, + "acc,exam_id__2012-06": 0.275, + "acc,exam_id__2013-12": 0.1875, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2013-11": 0.1875, + "acc,exam_id__2015-18": 0.2125, + "acc,exam_id__2012-08": 0.1875, + "acc,exam_id__2017-23": 0.1875, + "acc,exam_id__2011-03": 0.21212121212121213, + "acc,exam_id__2014-15": 0.2564102564102564, + "acc,exam_id__2011-04": 0.2625, + "acc,exam_id__2015-16": 0.225, + "acc,exam_id__2016-21": 0.2375, + "acc,exam_id__2011-05": 0.2875, + "acc,exam_id__2010-02": 0.24, + "acc,exam_id__2012-06a": 0.2, + "acc,exam_id__2016-19": 0.20512820512820512, + "acc,exam_id__2010-01": 0.25882352941176473, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.412292817679558, + "acc,all": 0.7015276145710928 + }, + "tweetsentbr": { + "f1_macro,all": 0.21833154883841985, + "acc,all": 0.33681592039800995, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "5d86478db762a26c603eb220da918643c860f54f", - "model_dtype": "torch.float16", - "model_memory_footprint": 1129305088, - "model_num_parameters": 463987712, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1307.2818627450981, - "min_seq_length": 1288, - "max_seq_length": 1369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1498.2818627450981, - "min_seq_length": 1479, - "max_seq_length": 1560, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2378303198886, - "min_seq_length": 1175, - "max_seq_length": 2146, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1415.7809657102869, - "min_seq_length": 1185, - "max_seq_length": 2376, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1438.6876923076923, - "min_seq_length": 1394, - "max_seq_length": 1534, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "5d86478db762a26c603eb220da918643c860f54f", + "model_dtype": "torch.float16", + "model_memory_footprint": 1129305088, + "model_num_parameters": 463987712, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1245.6992857142857, - "min_seq_length": 1227, - "max_seq_length": 1466, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1203.888382687927, - "min_seq_length": 974, - "max_seq_length": 1617, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1307.2818627450981, + "min_seq_length": 1288, + "max_seq_length": 1369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1498.2818627450981, + "min_seq_length": 1479, + "max_seq_length": 1560, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2378303198886, + "min_seq_length": 1175, + "max_seq_length": 2146, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1415.7809657102869, + "min_seq_length": 1185, + "max_seq_length": 2376, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1438.6876923076923, + "min_seq_length": 1394, + "max_seq_length": 1534, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1245.6992857142857, + "min_seq_length": 1227, + "max_seq_length": 1466, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1203.888382687927, + "min_seq_length": 974, + "max_seq_length": 1617, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1688.62044653349, + "min_seq_length": 1658, + "max_seq_length": 1722, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1539.0567164179104, + "min_seq_length": 1521, + "max_seq_length": 1587, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1688.62044653349, - "min_seq_length": 1658, - "max_seq_length": 1722, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=M4-ai/tau-0.5B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1539.0567164179104, - "min_seq_length": 1521, - "max_seq_length": 1587, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=M4-ai/tau-0.5B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/M4-ai/tau-0.5B/results_2024-04-23T00-49-38.450870.json b/M4-ai/tau-0.5B/results_2024-04-23T00-49-38.450870.json index 112bd45a28dc370ea31a6d7e2b1995d4f2fef3aa..917dbbdbf2d1bd8ab2bef084d9e92ab099478289 100644 --- a/M4-ai/tau-0.5B/results_2024-04-23T00-49-38.450870.json +++ b/M4-ai/tau-0.5B/results_2024-04-23T00-49-38.450870.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.26380278047907163, - "all_grouped_npm": -0.12163492659322077, + "all_grouped_average": 0.27613669610026687, + "all_grouped_npm": -0.09696709535083023, "all_grouped": { "enem_challenge": 0.19314205738278517, "bluex": 0.18915159944367177, @@ -43,7 +43,7 @@ "assin2_rte": 0.39285662181494563, "assin2_sts": 0.07216057977107923, "faquad_nli": 0.4396551724137931, - "hatebr_offensive": 0.222010481181515, + "hatebr_offensive": 0.33301572177227245, "portuguese_hate_speech": 0.412292817679558, "tweetsentbr": 0.21833154883841985 }, @@ -54,7 +54,7 @@ "harness|assin2_rte|assin2_rte|None|15": 0.39285662181494563, "harness|assin2_sts|assin2_sts|None|15": 0.07216057977107923, "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.222010481181515, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.33301572177227245, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.412292817679558, "harness|tweetsentbr|tweetsentbr|None|25": 0.21833154883841985 }, @@ -140,9 +140,9 @@ "main_score": 0.4396551724137931 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.222010481181515, + "f1_macro,all": 0.33301572177227245, "acc,all": 0.4992857142857143, - "main_score": 0.222010481181515 + "main_score": 0.33301572177227245 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { "f1_macro,all": 0.412292817679558, diff --git a/M4-ai/tau-1.8B/raw_2024-04-23T02-51-19.597157/results.json b/M4-ai/tau-1.8B/raw_2024-04-23T02-51-19.597157/results.json index c57c9aa0e46a40dc0b1bcfc14920b60a7ffcbe38..28b50ddcede3bc6fa8e71d520afcf0e8bc978e8c 100644 --- a/M4-ai/tau-1.8B/raw_2024-04-23T02-51-19.597157/results.json +++ b/M4-ai/tau-1.8B/raw_2024-04-23T02-51-19.597157/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.6240877656394997, - "acc,all": 0.6311274509803921, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.19269473597203718, - "mse,all": 2.222209967320261, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.23504867872044508, - "acc,exam_id__UNICAMP_2020": 0.23636363636363636, - "acc,exam_id__UNICAMP_2023": 0.2558139534883721, - "acc,exam_id__USP_2018": 0.2222222222222222, - "acc,exam_id__USP_2024": 0.1951219512195122, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__UNICAMP_2019": 0.18, - "acc,exam_id__USP_2022": 0.24489795918367346, - "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, - "acc,exam_id__UNICAMP_2022": 0.3076923076923077, - "acc,exam_id__UNICAMP_2024": 0.2, - "acc,exam_id__UNICAMP_2018": 0.25925925925925924, - "acc,exam_id__USP_2020": 0.3392857142857143, - "acc,exam_id__UNICAMP_2021_2": 0.23529411764705882, - "acc,exam_id__USP_2021": 0.1346153846153846, - "acc,exam_id__USP_2023": 0.25, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.2610216934919524, - "acc,exam_id__2009": 0.23478260869565218, - "acc,exam_id__2016": 0.2809917355371901, - "acc,exam_id__2014": 0.27522935779816515, - "acc,exam_id__2011": 0.23931623931623933, - "acc,exam_id__2012": 0.19827586206896552, - "acc,exam_id__2022": 0.2932330827067669, - "acc,exam_id__2017": 0.25, - "acc,exam_id__2023": 0.3333333333333333, - "acc,exam_id__2013": 0.23148148148148148, - "acc,exam_id__2010": 0.27350427350427353, - "acc,exam_id__2016_2": 0.2764227642276423, - "acc,exam_id__2015": 0.226890756302521 - }, - "faquad_nli": { - "f1_macro,all": 0.3987209371824756, - "acc,all": 0.6861538461538461, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.41976405672054573, - "acc,all": 0.4692857142857143 - }, - "oab_exams": { - "acc,all": 0.25466970387243737, - "acc,exam_id__2012-09": 0.23376623376623376, - "acc,exam_id__2015-17": 0.2692307692307692, - "acc,exam_id__2012-07": 0.325, - "acc,exam_id__2017-24": 0.25, - "acc,exam_id__2016-20a": 0.275, - "acc,exam_id__2016-20": 0.275, - "acc,exam_id__2017-22": 0.225, - "acc,exam_id__2014-13": 0.3, - "acc,exam_id__2013-10": 0.3, - "acc,exam_id__2014-14": 0.1875, - "acc,exam_id__2012-06": 0.2875, - "acc,exam_id__2013-12": 0.2375, - "acc,exam_id__2018-25": 0.2125, - "acc,exam_id__2013-11": 0.2375, - "acc,exam_id__2015-18": 0.25, - "acc,exam_id__2012-08": 0.2125, - "acc,exam_id__2017-23": 0.2625, - "acc,exam_id__2011-03": 0.24242424242424243, - "acc,exam_id__2014-15": 0.21794871794871795, - "acc,exam_id__2011-04": 0.2375, - "acc,exam_id__2015-16": 0.2, - "acc,exam_id__2016-21": 0.2375, - "acc,exam_id__2011-05": 0.2625, - "acc,exam_id__2010-02": 0.29, - "acc,exam_id__2012-06a": 0.325, - "acc,exam_id__2016-19": 0.2564102564102564, - "acc,exam_id__2010-01": 0.25882352941176473, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.32018744664167026, - "acc,all": 0.63572267920094 - }, - "tweetsentbr": { - "f1_macro,all": 0.15747184099568803, - "acc,all": 0.4427860696517413, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6240877656394997, + "acc,all": 0.6311274509803921, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.19269473597203718, + "mse,all": 2.222209967320261, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.23504867872044508, + "acc,exam_id__UNICAMP_2020": 0.23636363636363636, + "acc,exam_id__UNICAMP_2023": 0.2558139534883721, + "acc,exam_id__USP_2018": 0.2222222222222222, + "acc,exam_id__USP_2024": 0.1951219512195122, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__UNICAMP_2019": 0.18, + "acc,exam_id__USP_2022": 0.24489795918367346, + "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, + "acc,exam_id__UNICAMP_2022": 0.3076923076923077, + "acc,exam_id__UNICAMP_2024": 0.2, + "acc,exam_id__UNICAMP_2018": 0.25925925925925924, + "acc,exam_id__USP_2020": 0.3392857142857143, + "acc,exam_id__UNICAMP_2021_2": 0.23529411764705882, + "acc,exam_id__USP_2021": 0.1346153846153846, + "acc,exam_id__USP_2023": 0.25, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.2610216934919524, + "acc,exam_id__2009": 0.23478260869565218, + "acc,exam_id__2016": 0.2809917355371901, + "acc,exam_id__2014": 0.27522935779816515, + "acc,exam_id__2011": 0.23931623931623933, + "acc,exam_id__2012": 0.19827586206896552, + "acc,exam_id__2022": 0.2932330827067669, + "acc,exam_id__2017": 0.25, + "acc,exam_id__2023": 0.3333333333333333, + "acc,exam_id__2013": 0.23148148148148148, + "acc,exam_id__2010": 0.27350427350427353, + "acc,exam_id__2016_2": 0.2764227642276423, + "acc,exam_id__2015": 0.226890756302521 + }, + "faquad_nli": { + "f1_macro,all": 0.5980814057737135, + "acc,all": 0.6861538461538461, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.41976405672054573, + "acc,all": 0.4692857142857143 + }, + "oab_exams": { + "acc,all": 0.25466970387243737, + "acc,exam_id__2012-09": 0.23376623376623376, + "acc,exam_id__2015-17": 0.2692307692307692, + "acc,exam_id__2012-07": 0.325, + "acc,exam_id__2017-24": 0.25, + "acc,exam_id__2016-20a": 0.275, + "acc,exam_id__2016-20": 0.275, + "acc,exam_id__2017-22": 0.225, + "acc,exam_id__2014-13": 0.3, + "acc,exam_id__2013-10": 0.3, + "acc,exam_id__2014-14": 0.1875, + "acc,exam_id__2012-06": 0.2875, + "acc,exam_id__2013-12": 0.2375, + "acc,exam_id__2018-25": 0.2125, + "acc,exam_id__2013-11": 0.2375, + "acc,exam_id__2015-18": 0.25, + "acc,exam_id__2012-08": 0.2125, + "acc,exam_id__2017-23": 0.2625, + "acc,exam_id__2011-03": 0.24242424242424243, + "acc,exam_id__2014-15": 0.21794871794871795, + "acc,exam_id__2011-04": 0.2375, + "acc,exam_id__2015-16": 0.2, + "acc,exam_id__2016-21": 0.2375, + "acc,exam_id__2011-05": 0.2625, + "acc,exam_id__2010-02": 0.29, + "acc,exam_id__2012-06a": 0.325, + "acc,exam_id__2016-19": 0.2564102564102564, + "acc,exam_id__2010-01": 0.25882352941176473, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.48028116996250536, + "acc,all": 0.63572267920094 + }, + "tweetsentbr": { + "f1_macro,all": 0.20996245466091737, + "acc,all": 0.4427860696517413, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "9651feef56f7811d48a75cfd4e7bf011476321d5", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 4076316672, - "model_num_parameters": 1836828672, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1307.2818627450981, - "min_seq_length": 1288, - "max_seq_length": 1369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1498.2818627450981, - "min_seq_length": 1479, - "max_seq_length": 1560, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2378303198886, - "min_seq_length": 1175, - "max_seq_length": 2146, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1415.7809657102869, - "min_seq_length": 1185, - "max_seq_length": 2376, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1438.6876923076923, - "min_seq_length": 1394, - "max_seq_length": 1534, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "9651feef56f7811d48a75cfd4e7bf011476321d5", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 4076316672, + "model_num_parameters": 1836828672, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1245.6992857142857, - "min_seq_length": 1227, - "max_seq_length": 1466, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1203.888382687927, - "min_seq_length": 974, - "max_seq_length": 1617, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1307.2818627450981, + "min_seq_length": 1288, + "max_seq_length": 1369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1498.2818627450981, + "min_seq_length": 1479, + "max_seq_length": 1560, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2378303198886, + "min_seq_length": 1175, + "max_seq_length": 2146, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1415.7809657102869, + "min_seq_length": 1185, + "max_seq_length": 2376, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1438.6876923076923, + "min_seq_length": 1394, + "max_seq_length": 1534, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1245.6992857142857, + "min_seq_length": 1227, + "max_seq_length": 1466, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1203.888382687927, + "min_seq_length": 974, + "max_seq_length": 1617, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1688.62044653349, + "min_seq_length": 1658, + "max_seq_length": 1722, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1539.0567164179104, + "min_seq_length": 1521, + "max_seq_length": 1587, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1688.62044653349, - "min_seq_length": 1658, - "max_seq_length": 1722, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=M4-ai/tau-1.8B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1539.0567164179104, - "min_seq_length": 1521, - "max_seq_length": 1587, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=M4-ai/tau-1.8B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/M4-ai/tau-1.8B/results_2024-04-23T02-51-19.597157.json b/M4-ai/tau-1.8B/results_2024-04-23T02-51-19.597157.json index 249b72f45499c76d72add096acad44b9972f3a14..b96245ccbced6c98a2f56c7a4a16db5087f76c47 100644 --- a/M4-ai/tau-1.8B/results_2024-04-23T02-51-19.597157.json +++ b/M4-ai/tau-1.8B/results_2024-04-23T02-51-19.597157.json @@ -34,18 +34,18 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.3181852065818612, - "all_grouped_npm": -0.0320010321160039, + "all_grouped_average": 0.36395685164600594, + "all_grouped_npm": 0.05153942349668537, "all_grouped": { "enem_challenge": 0.2610216934919524, "bluex": 0.23504867872044508, "oab_exams": 0.25466970387243737, "assin2_rte": 0.6240877656394997, "assin2_sts": 0.19269473597203718, - "faquad_nli": 0.3987209371824756, + "faquad_nli": 0.5980814057737135, "hatebr_offensive": 0.41976405672054573, - "portuguese_hate_speech": 0.32018744664167026, - "tweetsentbr": 0.15747184099568803 + "portuguese_hate_speech": 0.48028116996250536, + "tweetsentbr": 0.20996245466091737 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.2610216934919524, @@ -53,10 +53,10 @@ "harness|oab_exams|oab_exams|None|3": 0.25466970387243737, "harness|assin2_rte|assin2_rte|None|15": 0.6240877656394997, "harness|assin2_sts|assin2_sts|None|15": 0.19269473597203718, - "harness|faquad_nli|faquad_nli|None|15": 0.3987209371824756, + "harness|faquad_nli|faquad_nli|None|15": 0.5980814057737135, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.41976405672054573, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.32018744664167026, - "harness|tweetsentbr|tweetsentbr|None|25": 0.15747184099568803 + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.48028116996250536, + "harness|tweetsentbr|tweetsentbr|None|25": 0.20996245466091737 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.2610216934919524, @@ -135,9 +135,9 @@ "main_score": 0.19269473597203718 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.3987209371824756, + "f1_macro,all": 0.5980814057737135, "acc,all": 0.6861538461538461, - "main_score": 0.3987209371824756 + "main_score": 0.5980814057737135 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.41976405672054573, @@ -145,14 +145,14 @@ "main_score": 0.41976405672054573 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.32018744664167026, + "f1_macro,all": 0.48028116996250536, "acc,all": 0.63572267920094, - "main_score": 0.32018744664167026 + "main_score": 0.48028116996250536 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.15747184099568803, + "f1_macro,all": 0.20996245466091737, "acc,all": 0.4427860696517413, - "main_score": 0.15747184099568803 + "main_score": 0.20996245466091737 } }, "config_tasks": { diff --git a/MTSAIR/multi_verse_model/raw_2024-05-26T23-29-38.848309/results.json b/MTSAIR/multi_verse_model/raw_2024-05-26T23-29-38.848309/results.json index 02c310f904d5737f8782d233080591b99c896f52..a18521ae4b0a52311b64ee69ded3035b0ab09d83 100644 --- a/MTSAIR/multi_verse_model/raw_2024-05-26T23-29-38.848309/results.json +++ b/MTSAIR/multi_verse_model/raw_2024-05-26T23-29-38.848309/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.6150413223140495, - "acc,all": 0.9223856209150327, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7909251769543553, - "mse,all": 0.4085235816993464, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.006954102920723227, - "acc,exam_id__UNICAMP_2019": 0.02, - "acc,exam_id__USP_2021": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__USP_2018": 0.0, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__USP_2022": 0.0, - "acc,exam_id__USP_2023": 0.06818181818181818, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__USP_2024": 0.024390243902439025, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__USP_2020": 0.0, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.05598320503848846, - "acc,exam_id__2017": 0.0603448275862069, - "acc,exam_id__2013": 0.037037037037037035, - "acc,exam_id__2016": 0.04132231404958678, - "acc,exam_id__2010": 0.06837606837606838, - "acc,exam_id__2011": 0.06837606837606838, - "acc,exam_id__2009": 0.1391304347826087, - "acc,exam_id__2022": 0.022556390977443608, - "acc,exam_id__2015": 0.058823529411764705, - "acc,exam_id__2014": 0.027522935779816515, - "acc,exam_id__2023": 0.07407407407407407, - "acc,exam_id__2016_2": 0.06504065040650407, - "acc,exam_id__2012": 0.008620689655172414 - }, - "faquad_nli": { - "f1_macro,all": 0.794365153254454, - "acc,all": 0.8384615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8432720746227155, - "acc,all": 0.845 - }, - "oab_exams": { - "acc,all": 0.0888382687927107, - "acc,exam_id__2012-06": 0.0625, - "acc,exam_id__2014-15": 0.15384615384615385, - "acc,exam_id__2012-06a": 0.0375, - "acc,exam_id__2010-01": 0.0, - "acc,exam_id__2013-10": 0.1125, - "acc,exam_id__2011-03": 0.030303030303030304, - "acc,exam_id__2018-25": 0.1, - "acc,exam_id__2017-23": 0.1375, - "acc,exam_id__2013-11": 0.15, - "acc,exam_id__2014-13": 0.075, - "acc,exam_id__2017-22": 0.1375, - "acc,exam_id__2010-02": 0.07, - "acc,exam_id__2014-14": 0.0875, - "acc,exam_id__2012-09": 0.1038961038961039, - "acc,exam_id__2016-19": 0.14102564102564102, - "acc,exam_id__2012-07": 0.025, - "acc,exam_id__2011-04": 0.05, - "acc,exam_id__2011-05": 0.0875, - "acc,exam_id__2016-20a": 0.0625, - "acc,exam_id__2013-12": 0.1125, - "acc,exam_id__2017-24": 0.1, - "acc,exam_id__2015-18": 0.1125, - "acc,exam_id__2016-21": 0.0625, - "acc,exam_id__2015-17": 0.15384615384615385, - "acc,exam_id__2012-08": 0.0875, - "acc,exam_id__2016-20": 0.0625, - "acc,exam_id__2015-16": 0.1125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6891172946940052, - "acc,all": 0.7309048178613397 - }, - "tweetsentbr": { - "f1_macro,all": 0.4973147955650199, - "acc,all": 0.7074626865671642, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9225619834710744, + "acc,all": 0.9223856209150327, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7909251769543553, + "mse,all": 0.4085235816993464, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.006954102920723227, + "acc,exam_id__UNICAMP_2019": 0.02, + "acc,exam_id__USP_2021": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__USP_2018": 0.0, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__USP_2022": 0.0, + "acc,exam_id__USP_2023": 0.06818181818181818, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__USP_2024": 0.024390243902439025, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__USP_2020": 0.0, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.05598320503848846, + "acc,exam_id__2017": 0.0603448275862069, + "acc,exam_id__2013": 0.037037037037037035, + "acc,exam_id__2016": 0.04132231404958678, + "acc,exam_id__2010": 0.06837606837606838, + "acc,exam_id__2011": 0.06837606837606838, + "acc,exam_id__2009": 0.1391304347826087, + "acc,exam_id__2022": 0.022556390977443608, + "acc,exam_id__2015": 0.058823529411764705, + "acc,exam_id__2014": 0.027522935779816515, + "acc,exam_id__2023": 0.07407407407407407, + "acc,exam_id__2016_2": 0.06504065040650407, + "acc,exam_id__2012": 0.008620689655172414 + }, + "faquad_nli": { + "f1_macro,all": 0.794365153254454, + "acc,all": 0.8384615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8432720746227155, + "acc,all": 0.845 + }, + "oab_exams": { + "acc,all": 0.0888382687927107, + "acc,exam_id__2012-06": 0.0625, + "acc,exam_id__2014-15": 0.15384615384615385, + "acc,exam_id__2012-06a": 0.0375, + "acc,exam_id__2010-01": 0.0, + "acc,exam_id__2013-10": 0.1125, + "acc,exam_id__2011-03": 0.030303030303030304, + "acc,exam_id__2018-25": 0.1, + "acc,exam_id__2017-23": 0.1375, + "acc,exam_id__2013-11": 0.15, + "acc,exam_id__2014-13": 0.075, + "acc,exam_id__2017-22": 0.1375, + "acc,exam_id__2010-02": 0.07, + "acc,exam_id__2014-14": 0.0875, + "acc,exam_id__2012-09": 0.1038961038961039, + "acc,exam_id__2016-19": 0.14102564102564102, + "acc,exam_id__2012-07": 0.025, + "acc,exam_id__2011-04": 0.05, + "acc,exam_id__2011-05": 0.0875, + "acc,exam_id__2016-20a": 0.0625, + "acc,exam_id__2013-12": 0.1125, + "acc,exam_id__2017-24": 0.1, + "acc,exam_id__2015-18": 0.1125, + "acc,exam_id__2016-21": 0.0625, + "acc,exam_id__2015-17": 0.15384615384615385, + "acc,exam_id__2012-08": 0.0875, + "acc,exam_id__2016-20": 0.0625, + "acc,exam_id__2015-16": 0.1125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6891172946940052, + "acc,all": 0.7309048178613397 + }, + "tweetsentbr": { + "f1_macro,all": 0.6630863940866932, + "acc,all": 0.7074626865671642, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "a4ca706d1bbc263b95e223a80ad68b0f125840b3", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1516.7455065359477, - "min_seq_length": 1493, - "max_seq_length": 1583, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1725.7455065359477, - "min_seq_length": 1702, - "max_seq_length": 1792, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1758.9262865090404, - "min_seq_length": 1382, - "max_seq_length": 2559, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1659.039188243527, - "min_seq_length": 1393, - "max_seq_length": 2657, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1741.9876923076922, - "min_seq_length": 1686, - "max_seq_length": 1862, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "a4ca706d1bbc263b95e223a80ad68b0f125840b3", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1542.3878571428572, - "min_seq_length": 1519, - "max_seq_length": 1793, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1404.764464692483, - "min_seq_length": 1138, - "max_seq_length": 1907, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1516.7455065359477, + "min_seq_length": 1493, + "max_seq_length": 1583, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1725.7455065359477, + "min_seq_length": 1702, + "max_seq_length": 1792, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1758.9262865090404, + "min_seq_length": 1382, + "max_seq_length": 2559, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1659.039188243527, + "min_seq_length": 1393, + "max_seq_length": 2657, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1741.9876923076922, + "min_seq_length": 1686, + "max_seq_length": 1862, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1542.3878571428572, + "min_seq_length": 1519, + "max_seq_length": 1793, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1404.764464692483, + "min_seq_length": 1138, + "max_seq_length": 1907, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2043.3360752056403, + "min_seq_length": 2008, + "max_seq_length": 2082, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1789.2492537313433, + "min_seq_length": 1768, + "max_seq_length": 1884, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2043.3360752056403, - "min_seq_length": 2008, - "max_seq_length": 2082, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MTSAIR/multi_verse_model,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1789.2492537313433, - "min_seq_length": 1768, - "max_seq_length": 1884, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MTSAIR/multi_verse_model,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/MTSAIR/multi_verse_model/results_2024-05-26T23-29-38.848309.json b/MTSAIR/multi_verse_model/results_2024-05-26T23-29-38.848309.json index 0e1e512e5e21bb68053cd6873ad5165f34d7538d..a274519eed3c5299e01788fecaeea1f0b0fc2ec2 100644 --- a/MTSAIR/multi_verse_model/results_2024-05-26T23-29-38.848309.json +++ b/MTSAIR/multi_verse_model/results_2024-05-26T23-29-38.848309.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.4868679326840579, - "all_grouped_npm": 0.2565051759229537, + "all_grouped_average": 0.5394559615372466, + "all_grouped_npm": 0.35225242577690785, "all_grouped": { "enem_challenge": 0.05598320503848846, "bluex": 0.006954102920723227, "oab_exams": 0.0888382687927107, - "assin2_rte": 0.6150413223140495, + "assin2_rte": 0.9225619834710744, "assin2_sts": 0.7909251769543553, "faquad_nli": 0.794365153254454, "hatebr_offensive": 0.8432720746227155, "portuguese_hate_speech": 0.6891172946940052, - "tweetsentbr": 0.4973147955650199 + "tweetsentbr": 0.6630863940866932 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.05598320503848846, "harness|bluex|bluex|None|3": 0.006954102920723227, "harness|oab_exams|oab_exams|None|3": 0.0888382687927107, - "harness|assin2_rte|assin2_rte|None|15": 0.6150413223140495, + "harness|assin2_rte|assin2_rte|None|15": 0.9225619834710744, "harness|assin2_sts|assin2_sts|None|15": 0.7909251769543553, "harness|faquad_nli|faquad_nli|None|15": 0.794365153254454, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8432720746227155, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6891172946940052, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4973147955650199 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6630863940866932 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.05598320503848846, @@ -125,9 +125,9 @@ "main_score": 0.0888382687927107 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.6150413223140495, + "f1_macro,all": 0.9225619834710744, "acc,all": 0.9223856209150327, - "main_score": 0.6150413223140495 + "main_score": 0.9225619834710744 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.7909251769543553, @@ -150,9 +150,9 @@ "main_score": 0.6891172946940052 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4973147955650199, + "f1_macro,all": 0.6630863940866932, "acc,all": 0.7074626865671642, - "main_score": 0.4973147955650199 + "main_score": 0.6630863940866932 } }, "config_tasks": { diff --git a/Magpie-Align/Llama-3-8B-Magpie-Align-v0.3/raw_2024-08-08T06-53-40.680854/results.json b/Magpie-Align/Llama-3-8B-Magpie-Align-v0.3/raw_2024-08-08T06-53-40.680854/results.json index d4d8268f3540b218fe76803a6d32c4348e32c9c6..ea8a58acfe03ef2328f61a3e066a7524162f8dd0 100644 --- a/Magpie-Align/Llama-3-8B-Magpie-Align-v0.3/raw_2024-08-08T06-53-40.680854/results.json +++ b/Magpie-Align/Llama-3-8B-Magpie-Align-v0.3/raw_2024-08-08T06-53-40.680854/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.5823414104527318, - "acc,all": 0.860702614379085, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.616645726427636, - "mse,all": 0.9795420724968137, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5312934631432545, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2023": 0.5581395348837209, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__UNICAMP_2024": 0.6, - "acc,exam_id__USP_2022": 0.5102040816326531, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2021": 0.4423076923076923, - "acc,exam_id__USP_2024": 0.5853658536585366, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6452064380685795, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2011": 0.6324786324786325, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2016": 0.5950413223140496, - "acc,exam_id__2016_2": 0.5853658536585366, - "acc,exam_id__2023": 0.7407407407407407, - "acc,exam_id__2022": 0.6015037593984962, - "acc,exam_id__2017": 0.6293103448275862, - "acc,exam_id__2015": 0.6386554621848739, - "acc,exam_id__2010": 0.6581196581196581, - "acc,exam_id__2012": 0.646551724137931, - "acc,exam_id__2009": 0.6869565217391305 - }, - "faquad_nli": { - "f1_macro,all": 0.408987684813557, - "acc,all": 0.6369230769230769, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.5497233119430516, - "acc,all": 0.8221428571428572 - }, - "oab_exams": { - "acc,all": 0.45148063781321185, - "acc,exam_id__2012-07": 0.5125, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2017-24": 0.325, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2012-06a": 0.5, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2011-05": 0.4125, - "acc,exam_id__2010-02": 0.5, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2016-20": 0.475, - "acc,exam_id__2011-03": 0.40404040404040403, - "acc,exam_id__2015-17": 0.5769230769230769, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2017-22": 0.5125, - "acc,exam_id__2014-15": 0.6153846153846154, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2012-09": 0.36363636363636365, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2012-06": 0.475, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2010-01": 0.4, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.3837832750665016, - "acc,all": 0.5757931844888367 - }, - "tweetsentbr": { - "f1_macro,all": 0.4443702918727547, - "acc,all": 0.6830845771144278, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8735121156790977, + "acc,all": 0.860702614379085, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.616645726427636, + "mse,all": 0.9795420724968137, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5312934631432545, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2023": 0.5581395348837209, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__UNICAMP_2024": 0.6, + "acc,exam_id__USP_2022": 0.5102040816326531, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2021": 0.4423076923076923, + "acc,exam_id__USP_2024": 0.5853658536585366, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6452064380685795, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2011": 0.6324786324786325, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2016": 0.5950413223140496, + "acc,exam_id__2016_2": 0.5853658536585366, + "acc,exam_id__2023": 0.7407407407407407, + "acc,exam_id__2022": 0.6015037593984962, + "acc,exam_id__2017": 0.6293103448275862, + "acc,exam_id__2015": 0.6386554621848739, + "acc,exam_id__2010": 0.6581196581196581, + "acc,exam_id__2012": 0.646551724137931, + "acc,exam_id__2009": 0.6869565217391305 + }, + "faquad_nli": { + "f1_macro,all": 0.6134815272203353, + "acc,all": 0.6369230769230769, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8245849679145775, + "acc,all": 0.8221428571428572 + }, + "oab_exams": { + "acc,all": 0.45148063781321185, + "acc,exam_id__2012-07": 0.5125, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2017-24": 0.325, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2012-06a": 0.5, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2011-05": 0.4125, + "acc,exam_id__2010-02": 0.5, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2016-20": 0.475, + "acc,exam_id__2011-03": 0.40404040404040403, + "acc,exam_id__2015-17": 0.5769230769230769, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2017-22": 0.5125, + "acc,exam_id__2014-15": 0.6153846153846154, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2012-09": 0.36363636363636365, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2012-06": 0.475, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2010-01": 0.4, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5756749125997527, + "acc,all": 0.5757931844888367 + }, + "tweetsentbr": { + "f1_macro,all": 0.5924937224970063, + "acc,all": 0.6830845771144278, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "7e420ddd6ff48bf213dcab2a9ddb7845b80dd1aa", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530944, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.769123783032, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "7e420ddd6ff48bf213dcab2a9ddb7845b80dd1aa", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530944, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.769123783032, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Magpie-Align/Llama-3-8B-Magpie-Align-v0.3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Magpie-Align/Llama-3-8B-Magpie-Align-v0.3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/Magpie-Align/Llama-3-8B-Magpie-Align-v0.3/results_2024-08-08T06-53-40.680854.json b/Magpie-Align/Llama-3-8B-Magpie-Align-v0.3/results_2024-08-08T06-53-40.680854.json index 2fed4c8a993abdf99f5ae27675dc44c771bc23b9..2e8543a317390f0ea4520d6c41cb426a16d875fb 100644 --- a/Magpie-Align/Llama-3-8B-Magpie-Align-v0.3/results_2024-08-08T06-53-40.680854.json +++ b/Magpie-Align/Llama-3-8B-Magpie-Align-v0.3/results_2024-08-08T06-53-40.680854.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5126480266223643, - "all_grouped_npm": 0.22279268797189866, + "all_grouped_average": 0.6360415012626057, + "all_grouped_npm": 0.45576028533379187, "all_grouped": { "enem_challenge": 0.6452064380685795, "bluex": 0.5312934631432545, "oab_exams": 0.45148063781321185, - "assin2_rte": 0.5823414104527318, + "assin2_rte": 0.8735121156790977, "assin2_sts": 0.616645726427636, - "faquad_nli": 0.408987684813557, - "hatebr_offensive": 0.5497233119430516, - "portuguese_hate_speech": 0.3837832750665016, - "tweetsentbr": 0.4443702918727547 + "faquad_nli": 0.6134815272203353, + "hatebr_offensive": 0.8245849679145775, + "portuguese_hate_speech": 0.5756749125997527, + "tweetsentbr": 0.5924937224970063 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6452064380685795, "harness|bluex|bluex|None|3": 0.5312934631432545, "harness|oab_exams|oab_exams|None|3": 0.45148063781321185, - "harness|assin2_rte|assin2_rte|None|15": 0.5823414104527318, + "harness|assin2_rte|assin2_rte|None|15": 0.8735121156790977, "harness|assin2_sts|assin2_sts|None|15": 0.616645726427636, - "harness|faquad_nli|faquad_nli|None|15": 0.408987684813557, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5497233119430516, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3837832750665016, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4443702918727547 + "harness|faquad_nli|faquad_nli|None|15": 0.6134815272203353, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8245849679145775, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5756749125997527, + "harness|tweetsentbr|tweetsentbr|None|25": 0.5924937224970063 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6452064380685795, @@ -125,9 +125,9 @@ "main_score": 0.45148063781321185 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.5823414104527318, + "f1_macro,all": 0.8735121156790977, "acc,all": 0.860702614379085, - "main_score": 0.5823414104527318 + "main_score": 0.8735121156790977 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.616645726427636, @@ -135,24 +135,24 @@ "main_score": 0.616645726427636 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.408987684813557, + "f1_macro,all": 0.6134815272203353, "acc,all": 0.6369230769230769, - "main_score": 0.408987684813557 + "main_score": 0.6134815272203353 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.5497233119430516, + "f1_macro,all": 0.8245849679145775, "acc,all": 0.8221428571428572, - "main_score": 0.5497233119430516 + "main_score": 0.8245849679145775 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.3837832750665016, + "f1_macro,all": 0.5756749125997527, "acc,all": 0.5757931844888367, - "main_score": 0.3837832750665016 + "main_score": 0.5756749125997527 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4443702918727547, + "f1_macro,all": 0.5924937224970063, "acc,all": 0.6830845771144278, - "main_score": 0.4443702918727547 + "main_score": 0.5924937224970063 } }, "config_tasks": { diff --git a/MaziyarPanahi/Calme-4x7B-MoE-v0.1/raw_2024-06-16T23-38-24.109054/results.json b/MaziyarPanahi/Calme-4x7B-MoE-v0.1/raw_2024-06-16T23-38-24.109054/results.json index 24f6ab8aafcf4c55e6c3ff6c15beab9cf3caa6cd..a7615c5912d6f7c5d35420c6cf9f10348806fbd5 100644 --- a/MaziyarPanahi/Calme-4x7B-MoE-v0.1/raw_2024-06-16T23-38-24.109054/results.json +++ b/MaziyarPanahi/Calme-4x7B-MoE-v0.1/raw_2024-06-16T23-38-24.109054/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9207511579149721, - "acc,all": 0.920751633986928, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7871041906910172, - "mse,all": 0.4193495533088235, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.0027816411682892906, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__USP_2023": 0.0, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__USP_2018": 0.018518518518518517, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__UNICAMP_2019": 0.0, - "acc,exam_id__USP_2021": 0.0, - "acc,exam_id__USP_2024": 0.0, - "acc,exam_id__USP_2020": 0.017857142857142856, - "acc,exam_id__USP_2022": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.0013995801259622112, - "acc,exam_id__2014": 0.0, - "acc,exam_id__2023": 0.0, - "acc,exam_id__2011": 0.0, - "acc,exam_id__2010": 0.0, - "acc,exam_id__2017": 0.0, - "acc,exam_id__2022": 0.0, - "acc,exam_id__2009": 0.0, - "acc,exam_id__2015": 0.0, - "acc,exam_id__2013": 0.009259259259259259, - "acc,exam_id__2016": 0.008264462809917356, - "acc,exam_id__2012": 0.0, - "acc,exam_id__2016_2": 0.0 - }, - "faquad_nli": { - "f1_macro,all": 0.7526873404995746, - "acc,all": 0.8107692307692308, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8025288840687826, - "acc,all": 0.8071428571428572 - }, - "oab_exams": { - "acc,all": 0.02232346241457859, - "acc,exam_id__2015-18": 0.025, - "acc,exam_id__2011-04": 0.0, - "acc,exam_id__2014-13": 0.0375, - "acc,exam_id__2013-10": 0.0125, - "acc,exam_id__2013-12": 0.0125, - "acc,exam_id__2012-06a": 0.0125, - "acc,exam_id__2016-20": 0.025, - "acc,exam_id__2010-02": 0.01, - "acc,exam_id__2013-11": 0.0, - "acc,exam_id__2016-21": 0.05, - "acc,exam_id__2012-07": 0.0, - "acc,exam_id__2016-20a": 0.05, - "acc,exam_id__2010-01": 0.03529411764705882, - "acc,exam_id__2016-19": 0.01282051282051282, - "acc,exam_id__2017-23": 0.025, - "acc,exam_id__2012-06": 0.025, - "acc,exam_id__2012-09": 0.025974025974025976, - "acc,exam_id__2012-08": 0.05, - "acc,exam_id__2011-03": 0.020202020202020204, - "acc,exam_id__2015-16": 0.025, - "acc,exam_id__2017-24": 0.025, - "acc,exam_id__2015-17": 0.05128205128205128, - "acc,exam_id__2014-14": 0.0125, - "acc,exam_id__2018-25": 0.0, - "acc,exam_id__2014-15": 0.02564102564102564, - "acc,exam_id__2017-22": 0.0125, - "acc,exam_id__2011-05": 0.025, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6415794091839719, - "acc,all": 0.6615746180963572 - }, - "tweetsentbr": { - "f1_macro,all": 0.4900875471516297, - "acc,all": 0.6985074626865672, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9207511579149721, + "acc,all": 0.920751633986928, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7871041906910172, + "mse,all": 0.4193495533088235, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.0027816411682892906, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__USP_2023": 0.0, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__USP_2018": 0.018518518518518517, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__UNICAMP_2019": 0.0, + "acc,exam_id__USP_2021": 0.0, + "acc,exam_id__USP_2024": 0.0, + "acc,exam_id__USP_2020": 0.017857142857142856, + "acc,exam_id__USP_2022": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.0013995801259622112, + "acc,exam_id__2014": 0.0, + "acc,exam_id__2023": 0.0, + "acc,exam_id__2011": 0.0, + "acc,exam_id__2010": 0.0, + "acc,exam_id__2017": 0.0, + "acc,exam_id__2022": 0.0, + "acc,exam_id__2009": 0.0, + "acc,exam_id__2015": 0.0, + "acc,exam_id__2013": 0.009259259259259259, + "acc,exam_id__2016": 0.008264462809917356, + "acc,exam_id__2012": 0.0, + "acc,exam_id__2016_2": 0.0 + }, + "faquad_nli": { + "f1_macro,all": 0.7526873404995746, + "acc,all": 0.8107692307692308, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8025288840687826, + "acc,all": 0.8071428571428572 + }, + "oab_exams": { + "acc,all": 0.02232346241457859, + "acc,exam_id__2015-18": 0.025, + "acc,exam_id__2011-04": 0.0, + "acc,exam_id__2014-13": 0.0375, + "acc,exam_id__2013-10": 0.0125, + "acc,exam_id__2013-12": 0.0125, + "acc,exam_id__2012-06a": 0.0125, + "acc,exam_id__2016-20": 0.025, + "acc,exam_id__2010-02": 0.01, + "acc,exam_id__2013-11": 0.0, + "acc,exam_id__2016-21": 0.05, + "acc,exam_id__2012-07": 0.0, + "acc,exam_id__2016-20a": 0.05, + "acc,exam_id__2010-01": 0.03529411764705882, + "acc,exam_id__2016-19": 0.01282051282051282, + "acc,exam_id__2017-23": 0.025, + "acc,exam_id__2012-06": 0.025, + "acc,exam_id__2012-09": 0.025974025974025976, + "acc,exam_id__2012-08": 0.05, + "acc,exam_id__2011-03": 0.020202020202020204, + "acc,exam_id__2015-16": 0.025, + "acc,exam_id__2017-24": 0.025, + "acc,exam_id__2015-17": 0.05128205128205128, + "acc,exam_id__2014-14": 0.0125, + "acc,exam_id__2018-25": 0.0, + "acc,exam_id__2014-15": 0.02564102564102564, + "acc,exam_id__2017-22": 0.0125, + "acc,exam_id__2011-05": 0.025, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6415794091839719, + "acc,all": 0.6615746180963572 + }, + "tweetsentbr": { + "f1_macro,all": 0.6534500628688397, + "acc,all": 0.6985074626865672, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e2fab90eef37977002947684043f139a1660f519", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e2fab90eef37977002947684043f139a1660f519", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MaziyarPanahi/Calme-4x7B-MoE-v0.1,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MaziyarPanahi/Calme-4x7B-MoE-v0.1,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/MaziyarPanahi/Calme-4x7B-MoE-v0.1/results_2024-06-16T23-38-24.109054.json b/MaziyarPanahi/Calme-4x7B-MoE-v0.1/results_2024-06-16T23-38-24.109054.json index 0724c763430937ee6163d758a5c48241d4c0598c..57745ba989f61b3218afdad6cacddfa52b38de99 100644 --- a/MaziyarPanahi/Calme-4x7B-MoE-v0.1/results_2024-06-16T23-38-24.109054.json +++ b/MaziyarPanahi/Calme-4x7B-MoE-v0.1/results_2024-06-16T23-38-24.109054.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.4912492459131976, - "all_grouped_npm": 0.27708301369938426, + "all_grouped_average": 0.5094006365484431, + "all_grouped_npm": 0.3040940116684997, "all_grouped": { "enem_challenge": 0.0013995801259622112, "bluex": 0.0027816411682892906, @@ -45,7 +45,7 @@ "faquad_nli": 0.7526873404995746, "hatebr_offensive": 0.8025288840687826, "portuguese_hate_speech": 0.6415794091839719, - "tweetsentbr": 0.4900875471516297 + "tweetsentbr": 0.6534500628688397 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.0013995801259622112, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7526873404995746, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8025288840687826, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6415794091839719, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4900875471516297 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6534500628688397 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.0013995801259622112, @@ -150,9 +150,9 @@ "main_score": 0.6415794091839719 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4900875471516297, + "f1_macro,all": 0.6534500628688397, "acc,all": 0.6985074626865672, - "main_score": 0.4900875471516297 + "main_score": 0.6534500628688397 } }, "config_tasks": { diff --git a/MaziyarPanahi/Calme-4x7B-MoE-v0.2/raw_2024-07-13T10-03-53.912535/results.json b/MaziyarPanahi/Calme-4x7B-MoE-v0.2/raw_2024-07-13T10-03-53.912535/results.json index 4a6de5c8ea6aa5a71f37c4cc5c4adfd22299de88..a17536275a9567bb245ff5935a9193b0b6192c9b 100644 --- a/MaziyarPanahi/Calme-4x7B-MoE-v0.2/raw_2024-07-13T10-03-53.912535/results.json +++ b/MaziyarPanahi/Calme-4x7B-MoE-v0.2/raw_2024-07-13T10-03-53.912535/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9195254857821783, - "acc,all": 0.9195261437908496, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7862069927082468, - "mse,all": 0.42019251152818665, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.004172461752433936, - "acc,exam_id__UNICAMP_2021_1": 0.021739130434782608, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__USP_2022": 0.0, - "acc,exam_id__USP_2020": 0.017857142857142856, - "acc,exam_id__USP_2024": 0.0, - "acc,exam_id__USP_2021": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__USP_2018": 0.018518518518518517, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__USP_2023": 0.0, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__UNICAMP_2019": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.002099370188943317, - "acc,exam_id__2009": 0.0, - "acc,exam_id__2012": 0.0, - "acc,exam_id__2023": 0.0, - "acc,exam_id__2010": 0.0, - "acc,exam_id__2016_2": 0.0, - "acc,exam_id__2022": 0.0, - "acc,exam_id__2017": 0.0, - "acc,exam_id__2013": 0.018518518518518517, - "acc,exam_id__2011": 0.0, - "acc,exam_id__2014": 0.0, - "acc,exam_id__2015": 0.0, - "acc,exam_id__2016": 0.008264462809917356 - }, - "faquad_nli": { - "f1_macro,all": 0.7532485313153441, - "acc,all": 0.8123076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8110268392962026, - "acc,all": 0.815 - }, - "oab_exams": { - "acc,all": 0.014123006833712985, - "acc,exam_id__2011-03": 0.020202020202020204, - "acc,exam_id__2015-16": 0.0125, - "acc,exam_id__2016-21": 0.0125, - "acc,exam_id__2011-05": 0.025, - "acc,exam_id__2014-14": 0.0125, - "acc,exam_id__2010-02": 0.0, - "acc,exam_id__2016-19": 0.02564102564102564, - "acc,exam_id__2013-10": 0.025, - "acc,exam_id__2014-13": 0.025, - "acc,exam_id__2012-09": 0.025974025974025976, - "acc,exam_id__2016-20a": 0.0, - "acc,exam_id__2012-06": 0.0125, - "acc,exam_id__2013-11": 0.0, - "acc,exam_id__2012-06a": 0.025, - "acc,exam_id__2015-18": 0.025, - "acc,exam_id__2013-12": 0.0, - "acc,exam_id__2018-25": 0.0, - "acc,exam_id__2017-24": 0.025, - "acc,exam_id__2014-15": 0.0, - "acc,exam_id__2010-01": 0.0, - "acc,exam_id__2011-04": 0.0, - "acc,exam_id__2015-17": 0.038461538461538464, - "acc,exam_id__2016-20": 0.0125, - "acc,exam_id__2017-23": 0.0125, - "acc,exam_id__2012-07": 0.0, - "acc,exam_id__2012-08": 0.025, - "acc,exam_id__2017-22": 0.025, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6406166840217518, - "acc,all": 0.6568742655699178 - }, - "tweetsentbr": { - "f1_macro,all": 0.4786514099562765, - "acc,all": 0.6915422885572139, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9195254857821783, + "acc,all": 0.9195261437908496, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7862069927082468, + "mse,all": 0.42019251152818665, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.004172461752433936, + "acc,exam_id__UNICAMP_2021_1": 0.021739130434782608, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__USP_2022": 0.0, + "acc,exam_id__USP_2020": 0.017857142857142856, + "acc,exam_id__USP_2024": 0.0, + "acc,exam_id__USP_2021": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__USP_2018": 0.018518518518518517, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__USP_2023": 0.0, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__UNICAMP_2019": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.002099370188943317, + "acc,exam_id__2009": 0.0, + "acc,exam_id__2012": 0.0, + "acc,exam_id__2023": 0.0, + "acc,exam_id__2010": 0.0, + "acc,exam_id__2016_2": 0.0, + "acc,exam_id__2022": 0.0, + "acc,exam_id__2017": 0.0, + "acc,exam_id__2013": 0.018518518518518517, + "acc,exam_id__2011": 0.0, + "acc,exam_id__2014": 0.0, + "acc,exam_id__2015": 0.0, + "acc,exam_id__2016": 0.008264462809917356 + }, + "faquad_nli": { + "f1_macro,all": 0.7532485313153441, + "acc,all": 0.8123076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8110268392962026, + "acc,all": 0.815 + }, + "oab_exams": { + "acc,all": 0.014123006833712985, + "acc,exam_id__2011-03": 0.020202020202020204, + "acc,exam_id__2015-16": 0.0125, + "acc,exam_id__2016-21": 0.0125, + "acc,exam_id__2011-05": 0.025, + "acc,exam_id__2014-14": 0.0125, + "acc,exam_id__2010-02": 0.0, + "acc,exam_id__2016-19": 0.02564102564102564, + "acc,exam_id__2013-10": 0.025, + "acc,exam_id__2014-13": 0.025, + "acc,exam_id__2012-09": 0.025974025974025976, + "acc,exam_id__2016-20a": 0.0, + "acc,exam_id__2012-06": 0.0125, + "acc,exam_id__2013-11": 0.0, + "acc,exam_id__2012-06a": 0.025, + "acc,exam_id__2015-18": 0.025, + "acc,exam_id__2013-12": 0.0, + "acc,exam_id__2018-25": 0.0, + "acc,exam_id__2017-24": 0.025, + "acc,exam_id__2014-15": 0.0, + "acc,exam_id__2010-01": 0.0, + "acc,exam_id__2011-04": 0.0, + "acc,exam_id__2015-17": 0.038461538461538464, + "acc,exam_id__2016-20": 0.0125, + "acc,exam_id__2017-23": 0.0125, + "acc,exam_id__2012-07": 0.0, + "acc,exam_id__2012-08": 0.025, + "acc,exam_id__2017-22": 0.025, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6406166840217518, + "acc,all": 0.6568742655699178 + }, + "tweetsentbr": { + "f1_macro,all": 0.638201879941702, + "acc,all": 0.6915422885572139, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "ffef41baf94b3f88b30cf0aeb3fd72d9e4187161", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "ffef41baf94b3f88b30cf0aeb3fd72d9e4187161", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MaziyarPanahi/Calme-4x7B-MoE-v0.2,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MaziyarPanahi/Calme-4x7B-MoE-v0.2,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/MaziyarPanahi/Calme-4x7B-MoE-v0.2/results_2024-07-13T10-03-53.912535.json b/MaziyarPanahi/Calme-4x7B-MoE-v0.2/results_2024-07-13T10-03-53.912535.json index 80e7450f13d0acf0b6f1d41e9d8a0237a0052ea4..7e1ff167fa4de15ef11796167573acb2ec35b9f7 100644 --- a/MaziyarPanahi/Calme-4x7B-MoE-v0.2/results_2024-07-13T10-03-53.912535.json +++ b/MaziyarPanahi/Calme-4x7B-MoE-v0.2/results_2024-07-13T10-03-53.912535.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.48996342020612105, - "all_grouped_npm": 0.2756995104410953, + "all_grouped_average": 0.5076912502045017, + "all_grouped_npm": 0.3020802098434474, "all_grouped": { "enem_challenge": 0.002099370188943317, "bluex": 0.004172461752433936, @@ -45,7 +45,7 @@ "faquad_nli": 0.7532485313153441, "hatebr_offensive": 0.8110268392962026, "portuguese_hate_speech": 0.6406166840217518, - "tweetsentbr": 0.4786514099562765 + "tweetsentbr": 0.638201879941702 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.002099370188943317, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7532485313153441, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8110268392962026, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6406166840217518, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4786514099562765 + "harness|tweetsentbr|tweetsentbr|None|25": 0.638201879941702 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.002099370188943317, @@ -150,9 +150,9 @@ "main_score": 0.6406166840217518 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4786514099562765, + "f1_macro,all": 0.638201879941702, "acc,all": 0.6915422885572139, - "main_score": 0.4786514099562765 + "main_score": 0.638201879941702 } }, "config_tasks": { diff --git a/MaziyarPanahi/Llama-3-8B-Instruct-v0.10/raw_2024-08-13T22-35-47.080828/results.json b/MaziyarPanahi/Llama-3-8B-Instruct-v0.10/raw_2024-08-13T22-35-47.080828/results.json index 709d7a6ccd370b04c48caf659600237762defbe9..a91e47e9009f9f300bcca79243902a7d6d1c67c0 100644 --- a/MaziyarPanahi/Llama-3-8B-Instruct-v0.10/raw_2024-08-13T22-35-47.080828/results.json +++ b/MaziyarPanahi/Llama-3-8B-Instruct-v0.10/raw_2024-08-13T22-35-47.080828/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9162158773450131, - "acc,all": 0.9162581699346405, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7289740656496875, - "mse,all": 0.8603921568627452, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5841446453407511, - "acc,exam_id__UNICAMP_2024": 0.6888888888888889, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__USP_2021": 0.5769230769230769, - "acc,exam_id__USP_2018": 0.42592592592592593, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7137858642407278, - "acc,exam_id__2011": 0.7521367521367521, - "acc,exam_id__2016": 0.6942148760330579, - "acc,exam_id__2016_2": 0.6585365853658537, - "acc,exam_id__2023": 0.7703703703703704, - "acc,exam_id__2010": 0.717948717948718, - "acc,exam_id__2012": 0.7327586206896551, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2014": 0.6972477064220184, - "acc,exam_id__2017": 0.6982758620689655, - "acc,exam_id__2009": 0.7478260869565218, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2013": 0.7037037037037037 - }, - "faquad_nli": { - "f1_macro,all": 0.7511456136837518, - "acc,all": 0.8, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8618806417693999, - "acc,all": 0.8621428571428571 - }, - "oab_exams": { - "acc,all": 0.5029612756264237, - "acc,exam_id__2011-03": 0.45454545454545453, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2012-06a": 0.5375, - "acc,exam_id__2014-15": 0.5769230769230769, - "acc,exam_id__2017-22": 0.5625, - "acc,exam_id__2018-25": 0.5125, - "acc,exam_id__2016-20": 0.55, - "acc,exam_id__2012-06": 0.5375, - "acc,exam_id__2017-24": 0.4875, - "acc,exam_id__2015-18": 0.55, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2012-07": 0.4875, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2010-01": 0.4, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2014-14": 0.625, - "acc,exam_id__2012-09": 0.5064935064935064, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2013-10": 0.45, - "acc,exam_id__2011-04": 0.475, - "acc,exam_id__2015-16": 0.4625, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2010-02": 0.54, - "acc,exam_id__2012-08": 0.5, - "acc,exam_id__2015-17": 0.6153846153846154, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6285785998647415, - "acc,all": 0.6321974148061105 - }, - "tweetsentbr": { - "f1_macro,all": 0.5015019648574024, - "acc,all": 0.7189054726368159, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9162158773450131, + "acc,all": 0.9162581699346405, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7289740656496875, + "mse,all": 0.8603921568627452, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5841446453407511, + "acc,exam_id__UNICAMP_2024": 0.6888888888888889, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__USP_2021": 0.5769230769230769, + "acc,exam_id__USP_2018": 0.42592592592592593, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7137858642407278, + "acc,exam_id__2011": 0.7521367521367521, + "acc,exam_id__2016": 0.6942148760330579, + "acc,exam_id__2016_2": 0.6585365853658537, + "acc,exam_id__2023": 0.7703703703703704, + "acc,exam_id__2010": 0.717948717948718, + "acc,exam_id__2012": 0.7327586206896551, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2014": 0.6972477064220184, + "acc,exam_id__2017": 0.6982758620689655, + "acc,exam_id__2009": 0.7478260869565218, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2013": 0.7037037037037037 + }, + "faquad_nli": { + "f1_macro,all": 0.7511456136837518, + "acc,all": 0.8, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8618806417693999, + "acc,all": 0.8621428571428571 + }, + "oab_exams": { + "acc,all": 0.5029612756264237, + "acc,exam_id__2011-03": 0.45454545454545453, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2012-06a": 0.5375, + "acc,exam_id__2014-15": 0.5769230769230769, + "acc,exam_id__2017-22": 0.5625, + "acc,exam_id__2018-25": 0.5125, + "acc,exam_id__2016-20": 0.55, + "acc,exam_id__2012-06": 0.5375, + "acc,exam_id__2017-24": 0.4875, + "acc,exam_id__2015-18": 0.55, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2012-07": 0.4875, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2010-01": 0.4, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2014-14": 0.625, + "acc,exam_id__2012-09": 0.5064935064935064, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2013-10": 0.45, + "acc,exam_id__2011-04": 0.475, + "acc,exam_id__2015-16": 0.4625, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2010-02": 0.54, + "acc,exam_id__2012-08": 0.5, + "acc,exam_id__2015-17": 0.6153846153846154, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6285785998647415, + "acc,all": 0.6321974148061105 + }, + "tweetsentbr": { + "f1_macro,all": 0.6686692864765366, + "acc,all": 0.7189054726368159, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "55a6fc03e04f1a68a5e2df16f3d0485d9ea357c8", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530944, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "55a6fc03e04f1a68a5e2df16f3d0485d9ea357c8", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530944, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MaziyarPanahi/Llama-3-8B-Instruct-v0.10,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MaziyarPanahi/Llama-3-8B-Instruct-v0.10,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/MaziyarPanahi/Llama-3-8B-Instruct-v0.10/results_2024-08-13T22-35-47.080828.json b/MaziyarPanahi/Llama-3-8B-Instruct-v0.10/results_2024-08-13T22-35-47.080828.json index 6247dd342b2c2a20cb0d50d7babbfb5b73a6f9ca..118ebecbfd0e49413c26fe21aeb0bfadac568263 100644 --- a/MaziyarPanahi/Llama-3-8B-Instruct-v0.10/results_2024-08-13T22-35-47.080828.json +++ b/MaziyarPanahi/Llama-3-8B-Instruct-v0.10/results_2024-08-13T22-35-47.080828.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6876876164864331, - "all_grouped_npm": 0.5351030169272294, + "all_grouped_average": 0.7062617633330035, + "all_grouped_npm": 0.5627431164012927, "all_grouped": { "enem_challenge": 0.7137858642407278, "bluex": 0.5841446453407511, @@ -45,7 +45,7 @@ "faquad_nli": 0.7511456136837518, "hatebr_offensive": 0.8618806417693999, "portuguese_hate_speech": 0.6285785998647415, - "tweetsentbr": 0.5015019648574024 + "tweetsentbr": 0.6686692864765366 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7137858642407278, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7511456136837518, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8618806417693999, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6285785998647415, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5015019648574024 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6686692864765366 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7137858642407278, @@ -150,9 +150,9 @@ "main_score": 0.6285785998647415 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5015019648574024, + "f1_macro,all": 0.6686692864765366, "acc,all": 0.7189054726368159, - "main_score": 0.5015019648574024 + "main_score": 0.6686692864765366 } }, "config_tasks": { diff --git a/MaziyarPanahi/Llama-3-8B-Instruct-v0.8/raw_2024-06-12T10-17-20.117216/results.json b/MaziyarPanahi/Llama-3-8B-Instruct-v0.8/raw_2024-06-12T10-17-20.117216/results.json index 109a228bff3aea4ffade8952118bada29b1859cc..85b45c22e893cd5906b42714e479d2f08fc244fd 100644 --- a/MaziyarPanahi/Llama-3-8B-Instruct-v0.8/raw_2024-06-12T10-17-20.117216/results.json +++ b/MaziyarPanahi/Llama-3-8B-Instruct-v0.8/raw_2024-06-12T10-17-20.117216/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9182337965638154, - "acc,all": 0.9183006535947712, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7259213081034068, - "mse,all": 0.7093504901960784, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5924895688456189, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2024": 0.6666666666666666, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2018": 0.42592592592592593, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__UNICAMP_2021_1": 0.6521739130434783, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2019": 0.65, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.71518544436669, - "acc,exam_id__2014": 0.7339449541284404, - "acc,exam_id__2009": 0.7478260869565218, - "acc,exam_id__2017": 0.6896551724137931, - "acc,exam_id__2016": 0.7107438016528925, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2023": 0.762962962962963, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2010": 0.7264957264957265, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2011": 0.7435897435897436 - }, - "faquad_nli": { - "f1_macro,all": 0.7473170846635018, - "acc,all": 0.796923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8684640522875817, - "acc,all": 0.8685714285714285 - }, - "oab_exams": { - "acc,all": 0.5120728929384966, - "acc,exam_id__2013-11": 0.525, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2018-25": 0.5375, - "acc,exam_id__2014-14": 0.625, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2010-02": 0.56, - "acc,exam_id__2015-17": 0.6282051282051282, - "acc,exam_id__2012-06a": 0.575, - "acc,exam_id__2012-09": 0.5064935064935064, - "acc,exam_id__2016-20": 0.575, - "acc,exam_id__2012-07": 0.475, - "acc,exam_id__2012-08": 0.5125, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2015-16": 0.475, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2012-06": 0.525, - "acc,exam_id__2011-03": 0.46464646464646464, - "acc,exam_id__2017-24": 0.4875, - "acc,exam_id__2014-15": 0.5897435897435898, - "acc,exam_id__2015-18": 0.525, - "acc,exam_id__2016-19": 0.5384615384615384, - "acc,exam_id__2013-12": 0.575, - "acc,exam_id__2011-04": 0.5125, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2010-01": 0.36470588235294116, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6115918023075952, - "acc,all": 0.6133960047003525 - }, - "tweetsentbr": { - "f1_macro,all": 0.5050471309710864, - "acc,all": 0.7238805970149254, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9182337965638154, + "acc,all": 0.9183006535947712, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7259213081034068, + "mse,all": 0.7093504901960784, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5924895688456189, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2024": 0.6666666666666666, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2018": 0.42592592592592593, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__UNICAMP_2021_1": 0.6521739130434783, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2019": 0.65, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.71518544436669, + "acc,exam_id__2014": 0.7339449541284404, + "acc,exam_id__2009": 0.7478260869565218, + "acc,exam_id__2017": 0.6896551724137931, + "acc,exam_id__2016": 0.7107438016528925, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2023": 0.762962962962963, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2010": 0.7264957264957265, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2011": 0.7435897435897436 + }, + "faquad_nli": { + "f1_macro,all": 0.7473170846635018, + "acc,all": 0.796923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8684640522875817, + "acc,all": 0.8685714285714285 + }, + "oab_exams": { + "acc,all": 0.5120728929384966, + "acc,exam_id__2013-11": 0.525, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2018-25": 0.5375, + "acc,exam_id__2014-14": 0.625, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2010-02": 0.56, + "acc,exam_id__2015-17": 0.6282051282051282, + "acc,exam_id__2012-06a": 0.575, + "acc,exam_id__2012-09": 0.5064935064935064, + "acc,exam_id__2016-20": 0.575, + "acc,exam_id__2012-07": 0.475, + "acc,exam_id__2012-08": 0.5125, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2015-16": 0.475, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2012-06": 0.525, + "acc,exam_id__2011-03": 0.46464646464646464, + "acc,exam_id__2017-24": 0.4875, + "acc,exam_id__2014-15": 0.5897435897435898, + "acc,exam_id__2015-18": 0.525, + "acc,exam_id__2016-19": 0.5384615384615384, + "acc,exam_id__2013-12": 0.575, + "acc,exam_id__2011-04": 0.5125, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2010-01": 0.36470588235294116, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6115918023075952, + "acc,all": 0.6133960047003525 + }, + "tweetsentbr": { + "f1_macro,all": 0.6733961746281153, + "acc,all": 0.7238805970149254, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "94d222b8447b600b9836da4036df9490b59fe966", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16194748416, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "94d222b8447b600b9836da4036df9490b59fe966", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16194748416, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MaziyarPanahi/Llama-3-8B-Instruct-v0.8,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MaziyarPanahi/Llama-3-8B-Instruct-v0.8,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "f2a0116" + "git_hash": "f2a0116" } \ No newline at end of file diff --git a/MaziyarPanahi/Llama-3-8B-Instruct-v0.8/results_2024-06-12T10-17-20.117216.json b/MaziyarPanahi/Llama-3-8B-Instruct-v0.8/results_2024-06-12T10-17-20.117216.json index 9af20fe53d7a816c1c1f9d26c7c1973665d776c0..0f9e71d337b794bce3941492e71c68af196a186c 100644 --- a/MaziyarPanahi/Llama-3-8B-Instruct-v0.8/results_2024-06-12T10-17-20.117216.json +++ b/MaziyarPanahi/Llama-3-8B-Instruct-v0.8/results_2024-06-12T10-17-20.117216.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6884803423386436, - "all_grouped_npm": 0.5355973983808373, + "all_grouped_average": 0.7071857916338691, + "all_grouped_npm": 0.563432888403494, "all_grouped": { "enem_challenge": 0.71518544436669, "bluex": 0.5924895688456189, @@ -45,7 +45,7 @@ "faquad_nli": 0.7473170846635018, "hatebr_offensive": 0.8684640522875817, "portuguese_hate_speech": 0.6115918023075952, - "tweetsentbr": 0.5050471309710864 + "tweetsentbr": 0.6733961746281153 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.71518544436669, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7473170846635018, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8684640522875817, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6115918023075952, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5050471309710864 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6733961746281153 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.71518544436669, @@ -150,9 +150,9 @@ "main_score": 0.6115918023075952 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5050471309710864, + "f1_macro,all": 0.6733961746281153, "acc,all": 0.7238805970149254, - "main_score": 0.5050471309710864 + "main_score": 0.6733961746281153 } }, "config_tasks": { diff --git a/MaziyarPanahi/Llama-3-8B-Instruct-v0.9/raw_2024-08-13T23-42-30.931238/results.json b/MaziyarPanahi/Llama-3-8B-Instruct-v0.9/raw_2024-08-13T23-42-30.931238/results.json index 0a8770458a09bb099c06fef6bbac2f3fc90c07bb..16b770e0c11aa05e39ce3a51f998aa84c7489cad 100644 --- a/MaziyarPanahi/Llama-3-8B-Instruct-v0.9/raw_2024-08-13T23-42-30.931238/results.json +++ b/MaziyarPanahi/Llama-3-8B-Instruct-v0.9/raw_2024-08-13T23-42-30.931238/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9178565043038492, - "acc,all": 0.9178921568627451, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7263342614908861, - "mse,all": 0.8304207516339871, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5827538247566064, - "acc,exam_id__UNICAMP_2024": 0.6444444444444445, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__USP_2021": 0.5769230769230769, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__USP_2019": 0.575, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7116864940517844, - "acc,exam_id__2011": 0.7435897435897436, - "acc,exam_id__2016": 0.7024793388429752, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2023": 0.7703703703703704, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2012": 0.7241379310344828, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2014": 0.7155963302752294, - "acc,exam_id__2017": 0.6896551724137931, - "acc,exam_id__2009": 0.7652173913043478, - "acc,exam_id__2015": 0.7226890756302521, - "acc,exam_id__2013": 0.6759259259259259 - }, - "faquad_nli": { - "f1_macro,all": 0.7687727167272447, - "acc,all": 0.823076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8648344221671547, - "acc,all": 0.865 - }, - "oab_exams": { - "acc,all": 0.5056947608200456, - "acc,exam_id__2011-03": 0.46464646464646464, - "acc,exam_id__2016-19": 0.5384615384615384, - "acc,exam_id__2012-06a": 0.5375, - "acc,exam_id__2014-15": 0.5641025641025641, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2018-25": 0.525, - "acc,exam_id__2016-20": 0.5375, - "acc,exam_id__2012-06": 0.525, - "acc,exam_id__2017-24": 0.475, - "acc,exam_id__2015-18": 0.5375, - "acc,exam_id__2016-20a": 0.45, - "acc,exam_id__2012-07": 0.4875, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2013-11": 0.5125, - "acc,exam_id__2013-12": 0.55, - "acc,exam_id__2014-14": 0.6, - "acc,exam_id__2012-09": 0.5064935064935064, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2011-04": 0.5, - "acc,exam_id__2015-16": 0.4625, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2010-02": 0.56, - "acc,exam_id__2012-08": 0.5, - "acc,exam_id__2015-17": 0.6282051282051282, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6191176470588236, - "acc,all": 0.6216216216216216 - }, - "tweetsentbr": { - "f1_macro,all": 0.5003023525691382, - "acc,all": 0.7179104477611941, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9178565043038492, + "acc,all": 0.9178921568627451, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7263342614908861, + "mse,all": 0.8304207516339871, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5827538247566064, + "acc,exam_id__UNICAMP_2024": 0.6444444444444445, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__USP_2021": 0.5769230769230769, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__USP_2019": 0.575, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7116864940517844, + "acc,exam_id__2011": 0.7435897435897436, + "acc,exam_id__2016": 0.7024793388429752, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2023": 0.7703703703703704, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2012": 0.7241379310344828, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2014": 0.7155963302752294, + "acc,exam_id__2017": 0.6896551724137931, + "acc,exam_id__2009": 0.7652173913043478, + "acc,exam_id__2015": 0.7226890756302521, + "acc,exam_id__2013": 0.6759259259259259 + }, + "faquad_nli": { + "f1_macro,all": 0.7687727167272447, + "acc,all": 0.823076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8648344221671547, + "acc,all": 0.865 + }, + "oab_exams": { + "acc,all": 0.5056947608200456, + "acc,exam_id__2011-03": 0.46464646464646464, + "acc,exam_id__2016-19": 0.5384615384615384, + "acc,exam_id__2012-06a": 0.5375, + "acc,exam_id__2014-15": 0.5641025641025641, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2018-25": 0.525, + "acc,exam_id__2016-20": 0.5375, + "acc,exam_id__2012-06": 0.525, + "acc,exam_id__2017-24": 0.475, + "acc,exam_id__2015-18": 0.5375, + "acc,exam_id__2016-20a": 0.45, + "acc,exam_id__2012-07": 0.4875, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2013-11": 0.5125, + "acc,exam_id__2013-12": 0.55, + "acc,exam_id__2014-14": 0.6, + "acc,exam_id__2012-09": 0.5064935064935064, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2011-04": 0.5, + "acc,exam_id__2015-16": 0.4625, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2010-02": 0.56, + "acc,exam_id__2012-08": 0.5, + "acc,exam_id__2015-17": 0.6282051282051282, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6191176470588236, + "acc,all": 0.6216216216216216 + }, + "tweetsentbr": { + "f1_macro,all": 0.6670698034255177, + "acc,all": 0.7179104477611941, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "ddf91fdc0a3ab5e5d76864f1c4cf44e5adacd565", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530944, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "ddf91fdc0a3ab5e5d76864f1c4cf44e5adacd565", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530944, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MaziyarPanahi/Llama-3-8B-Instruct-v0.9,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MaziyarPanahi/Llama-3-8B-Instruct-v0.9,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/MaziyarPanahi/Llama-3-8B-Instruct-v0.9/results_2024-08-13T23-42-30.931238.json b/MaziyarPanahi/Llama-3-8B-Instruct-v0.9/results_2024-08-13T23-42-30.931238.json index 1eddd19f55df1458ad232fa1a821359242a6f39b..34542d804f6b600932f834c2401c1844ecef6f11 100644 --- a/MaziyarPanahi/Llama-3-8B-Instruct-v0.9/results_2024-08-13T23-42-30.931238.json +++ b/MaziyarPanahi/Llama-3-8B-Instruct-v0.9/results_2024-08-13T23-42-30.931238.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.688594775993948, - "all_grouped_npm": 0.5371289330065729, + "all_grouped_average": 0.707124492755768, + "all_grouped_npm": 0.5647029162830907, "all_grouped": { "enem_challenge": 0.7116864940517844, "bluex": 0.5827538247566064, @@ -45,7 +45,7 @@ "faquad_nli": 0.7687727167272447, "hatebr_offensive": 0.8648344221671547, "portuguese_hate_speech": 0.6191176470588236, - "tweetsentbr": 0.5003023525691382 + "tweetsentbr": 0.6670698034255177 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7116864940517844, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7687727167272447, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8648344221671547, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6191176470588236, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5003023525691382 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6670698034255177 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7116864940517844, @@ -150,9 +150,9 @@ "main_score": 0.6191176470588236 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5003023525691382, + "f1_macro,all": 0.6670698034255177, "acc,all": 0.7179104477611941, - "main_score": 0.5003023525691382 + "main_score": 0.6670698034255177 } }, "config_tasks": { diff --git a/MaziyarPanahi/Mistral-7B-Instruct-Aya-101/raw_2024-04-17T09-07-30.140283/results.json b/MaziyarPanahi/Mistral-7B-Instruct-Aya-101/raw_2024-04-17T09-07-30.140283/results.json index f64ba675faba82352a3837dd5c18e25257a758c8..c5d8763f2bc1e79698b10a19f5d6c93217ce695d 100644 --- a/MaziyarPanahi/Mistral-7B-Instruct-Aya-101/raw_2024-04-17T09-07-30.140283/results.json +++ b/MaziyarPanahi/Mistral-7B-Instruct-Aya-101/raw_2024-04-17T09-07-30.140283/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9072398971802695, - "acc,all": 0.9072712418300654, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7641692139433879, - "mse,all": 0.6519485294117647, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5438108484005564, - "acc,exam_id__USP_2022": 0.4489795918367347, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2019": 0.64, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__UNICAMP_2022": 0.5128205128205128, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__USP_2023": 0.6363636363636364, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6060181945416375, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2010": 0.5897435897435898, - "acc,exam_id__2017": 0.5517241379310345, - "acc,exam_id__2023": 0.6296296296296297, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2012": 0.5775862068965517, - "acc,exam_id__2013": 0.6018518518518519, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2009": 0.6434782608695652, - "acc,exam_id__2022": 0.5789473684210527 - }, - "faquad_nli": { - "f1_macro,all": 0.6218181818181818, - "acc,all": 0.803076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8004209608305171, - "acc,all": 0.8057142857142857 - }, - "oab_exams": { - "acc,all": 0.39362186788154896, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2012-09": 0.36363636363636365, - "acc,exam_id__2017-22": 0.5, - "acc,exam_id__2011-03": 0.36363636363636365, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2016-20a": 0.2625, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2011-05": 0.35, - "acc,exam_id__2010-02": 0.4, - "acc,exam_id__2017-24": 0.325, - "acc,exam_id__2015-18": 0.4375, - "acc,exam_id__2012-06": 0.4125, - "acc,exam_id__2013-12": 0.4, - "acc,exam_id__2014-14": 0.45, - "acc,exam_id__2014-13": 0.375, - "acc,exam_id__2017-23": 0.375, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2013-10": 0.3625, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2012-06a": 0.3875, - "acc,exam_id__2012-08": 0.4, - "acc,exam_id__2015-17": 0.5128205128205128, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2011-04": 0.3875, - "acc,exam_id__2014-15": 0.38461538461538464, - "acc,exam_id__2013-11": 0.35, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6762940852684385, - "acc,all": 0.700352526439483 - }, - "tweetsentbr": { - "f1_macro,all": 0.5030635127570277, - "acc,all": 0.7034825870646766, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9072398971802695, + "acc,all": 0.9072712418300654, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7641692139433879, + "mse,all": 0.6519485294117647, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5438108484005564, + "acc,exam_id__USP_2022": 0.4489795918367347, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2019": 0.64, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__UNICAMP_2022": 0.5128205128205128, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__USP_2023": 0.6363636363636364, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6060181945416375, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2010": 0.5897435897435898, + "acc,exam_id__2017": 0.5517241379310345, + "acc,exam_id__2023": 0.6296296296296297, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2012": 0.5775862068965517, + "acc,exam_id__2013": 0.6018518518518519, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2009": 0.6434782608695652, + "acc,exam_id__2022": 0.5789473684210527 + }, + "faquad_nli": { + "f1_macro,all": 0.6218181818181818, + "acc,all": 0.803076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8004209608305171, + "acc,all": 0.8057142857142857 + }, + "oab_exams": { + "acc,all": 0.39362186788154896, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2012-09": 0.36363636363636365, + "acc,exam_id__2017-22": 0.5, + "acc,exam_id__2011-03": 0.36363636363636365, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2016-20a": 0.2625, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2011-05": 0.35, + "acc,exam_id__2010-02": 0.4, + "acc,exam_id__2017-24": 0.325, + "acc,exam_id__2015-18": 0.4375, + "acc,exam_id__2012-06": 0.4125, + "acc,exam_id__2013-12": 0.4, + "acc,exam_id__2014-14": 0.45, + "acc,exam_id__2014-13": 0.375, + "acc,exam_id__2017-23": 0.375, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2013-10": 0.3625, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2012-06a": 0.3875, + "acc,exam_id__2012-08": 0.4, + "acc,exam_id__2015-17": 0.5128205128205128, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2011-04": 0.3875, + "acc,exam_id__2014-15": 0.38461538461538464, + "acc,exam_id__2013-11": 0.35, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6762940852684385, + "acc,all": 0.700352526439483 + }, + "tweetsentbr": { + "f1_macro,all": 0.6707513503427037, + "acc,all": 0.7034825870646766, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "7724e49d560d6b030e67aea0fe319020103929c0", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "7724e49d560d6b030e67aea0fe319020103929c0", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1881.2492537313433, + "min_seq_length": 1860, + "max_seq_length": 1976, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MaziyarPanahi/Mistral-7B-Instruct-Aya-101,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1881.2492537313433, - "min_seq_length": 1860, - "max_seq_length": 1976, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MaziyarPanahi/Mistral-7B-Instruct-Aya-101,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/MaziyarPanahi/Mistral-7B-Instruct-Aya-101/results_2024-04-17T09-07-30.140283.json b/MaziyarPanahi/Mistral-7B-Instruct-Aya-101/results_2024-04-17T09-07-30.140283.json index c0029ccce2de123d367c35adfaf34e485010f90b..79a12d6c336fda39e9576e6f64cf5519a2bd85bd 100644 --- a/MaziyarPanahi/Mistral-7B-Instruct-Aya-101/results_2024-04-17T09-07-30.140283.json +++ b/MaziyarPanahi/Mistral-7B-Instruct-Aya-101/results_2024-04-17T09-07-30.140283.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6462729736246184, - "all_grouped_npm": 0.4704317569510121, + "all_grouped_average": 0.6649049555785823, + "all_grouped_npm": 0.49815792057298236, "all_grouped": { "enem_challenge": 0.6060181945416375, "bluex": 0.5438108484005564, @@ -45,7 +45,7 @@ "faquad_nli": 0.6218181818181818, "hatebr_offensive": 0.8004209608305171, "portuguese_hate_speech": 0.6762940852684385, - "tweetsentbr": 0.5030635127570277 + "tweetsentbr": 0.6707513503427037 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6060181945416375, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6218181818181818, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8004209608305171, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6762940852684385, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5030635127570277 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6707513503427037 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6060181945416375, @@ -150,9 +150,9 @@ "main_score": 0.6762940852684385 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5030635127570277, + "f1_macro,all": 0.6707513503427037, "acc,all": 0.7034825870646766, - "main_score": 0.5030635127570277 + "main_score": 0.6707513503427037 } }, "config_tasks": { diff --git a/MaziyarPanahi/Mistral-7B-Instruct-v0.3/raw_2024-05-26T08-26-53.282710/results.json b/MaziyarPanahi/Mistral-7B-Instruct-v0.3/raw_2024-05-26T08-26-53.282710/results.json index 26b33eb1dc43bb3a79af85a4fbd151f9be86ae54..b738730cbbc2b7a6165beb07e726d4cdf99503a6 100644 --- a/MaziyarPanahi/Mistral-7B-Instruct-v0.3/raw_2024-05-26T08-26-53.282710/results.json +++ b/MaziyarPanahi/Mistral-7B-Instruct-v0.3/raw_2024-05-26T08-26-53.282710/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9027027027027027, - "acc,all": 0.9027777777777778, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7942968956652824, - "mse,all": 0.4601633986928105, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5312934631432545, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__USP_2022": 0.42857142857142855, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__USP_2020": 0.5, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6242127361791463, - "acc,exam_id__2015": 0.5546218487394958, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2012": 0.6293103448275862, - "acc,exam_id__2010": 0.5982905982905983, - "acc,exam_id__2016": 0.6694214876033058, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2017": 0.5258620689655172, - "acc,exam_id__2022": 0.6466165413533834, - "acc,exam_id__2023": 0.6444444444444445, - "acc,exam_id__2009": 0.591304347826087, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2014": 0.6422018348623854 - }, - "faquad_nli": { - "f1_macro,all": 0.6866859299725274, - "acc,all": 0.7184615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8343452944711321, - "acc,all": 0.8364285714285714 - }, - "oab_exams": { - "acc,all": 0.4359908883826879, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2012-06": 0.4, - "acc,exam_id__2016-21": 0.35, - "acc,exam_id__2011-04": 0.35, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2015-16": 0.4375, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2012-06a": 0.4875, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2012-08": 0.45, - "acc,exam_id__2015-17": 0.5128205128205128, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2016-20": 0.45, - "acc,exam_id__2017-24": 0.3875, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2010-02": 0.43, - "acc,exam_id__2015-18": 0.425, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2013-12": 0.55, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2014-15": 0.5512820512820513, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6824631701748856, - "acc,all": 0.7121034077555817 - }, - "tweetsentbr": { - "f1_macro,all": 0.4747604946147119, - "acc,all": 0.6860696517412935, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9027027027027027, + "acc,all": 0.9027777777777778, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7942968956652824, + "mse,all": 0.4601633986928105, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5312934631432545, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__USP_2022": 0.42857142857142855, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__USP_2020": 0.5, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6242127361791463, + "acc,exam_id__2015": 0.5546218487394958, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2012": 0.6293103448275862, + "acc,exam_id__2010": 0.5982905982905983, + "acc,exam_id__2016": 0.6694214876033058, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2017": 0.5258620689655172, + "acc,exam_id__2022": 0.6466165413533834, + "acc,exam_id__2023": 0.6444444444444445, + "acc,exam_id__2009": 0.591304347826087, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2014": 0.6422018348623854 + }, + "faquad_nli": { + "f1_macro,all": 0.6866859299725274, + "acc,all": 0.7184615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8343452944711321, + "acc,all": 0.8364285714285714 + }, + "oab_exams": { + "acc,all": 0.4359908883826879, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2012-06": 0.4, + "acc,exam_id__2016-21": 0.35, + "acc,exam_id__2011-04": 0.35, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2015-16": 0.4375, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2012-06a": 0.4875, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2012-08": 0.45, + "acc,exam_id__2015-17": 0.5128205128205128, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2016-20": 0.45, + "acc,exam_id__2017-24": 0.3875, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2010-02": 0.43, + "acc,exam_id__2015-18": 0.425, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2013-12": 0.55, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2014-15": 0.5512820512820513, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6824631701748856, + "acc,all": 0.7121034077555817 + }, + "tweetsentbr": { + "f1_macro,all": 0.6330139928196159, + "acc,all": 0.6860696517412935, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 2, - "non_truncated": 14148, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 2, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "39bf6ae128500f10d432414c72f50223467b24cc", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15032926208, - "model_num_parameters": 7248023552, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1403.7455065359477, - "min_seq_length": 1380, - "max_seq_length": 1470, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1642.7455065359477, - "min_seq_length": 1619, - "max_seq_length": 1709, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1732.9262865090404, - "min_seq_length": 1356, - "max_seq_length": 2533, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1633.039188243527, - "min_seq_length": 1367, - "max_seq_length": 2631, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1643.9876923076922, - "min_seq_length": 1588, - "max_seq_length": 1764, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 2, + "non_truncated": 14148, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 2, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "39bf6ae128500f10d432414c72f50223467b24cc", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15032926208, + "model_num_parameters": 7248023552, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1384.3878571428572, - "min_seq_length": 1361, - "max_seq_length": 1635, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1378.764464692483, - "min_seq_length": 1112, - "max_seq_length": 1881, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1403.7455065359477, + "min_seq_length": 1380, + "max_seq_length": 1470, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1642.7455065359477, + "min_seq_length": 1619, + "max_seq_length": 1709, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1732.9262865090404, + "min_seq_length": 1356, + "max_seq_length": 2533, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1633.039188243527, + "min_seq_length": 1367, + "max_seq_length": 2631, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1643.9876923076922, + "min_seq_length": 1588, + "max_seq_length": 1764, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1384.3878571428572, + "min_seq_length": 1361, + "max_seq_length": 1635, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1378.764464692483, + "min_seq_length": 1112, + "max_seq_length": 1881, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1885.3360752056403, + "min_seq_length": 1850, + "max_seq_length": 1924, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1631.2492537313433, + "min_seq_length": 1610, + "max_seq_length": 1726, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1885.3360752056403, - "min_seq_length": 1850, - "max_seq_length": 1924, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MaziyarPanahi/Mistral-7B-Instruct-v0.3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1631.2492537313433, - "min_seq_length": 1610, - "max_seq_length": 1726, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MaziyarPanahi/Mistral-7B-Instruct-v0.3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/MaziyarPanahi/Mistral-7B-Instruct-v0.3/results_2024-05-26T08-26-53.282710.json b/MaziyarPanahi/Mistral-7B-Instruct-v0.3/results_2024-05-26T08-26-53.282710.json index c47264a1884c5e47786c31e380fb9890aefb33f4..84cd685243d163c1eccd1b64ff2a61c3c1c633bc 100644 --- a/MaziyarPanahi/Mistral-7B-Instruct-v0.3/results_2024-05-26T08-26-53.282710.json +++ b/MaziyarPanahi/Mistral-7B-Instruct-v0.3/results_2024-05-26T08-26-53.282710.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.662972397256259, - "all_grouped_npm": 0.4972041070748634, + "all_grouped_average": 0.6805561192790262, + "all_grouped_npm": 0.5233703600849337, "all_grouped": { "enem_challenge": 0.6242127361791463, "bluex": 0.5312934631432545, @@ -45,7 +45,7 @@ "faquad_nli": 0.6866859299725274, "hatebr_offensive": 0.8343452944711321, "portuguese_hate_speech": 0.6824631701748856, - "tweetsentbr": 0.4747604946147119 + "tweetsentbr": 0.6330139928196159 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6242127361791463, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6866859299725274, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8343452944711321, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6824631701748856, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4747604946147119 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6330139928196159 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6242127361791463, @@ -150,9 +150,9 @@ "main_score": 0.6824631701748856 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4747604946147119, + "f1_macro,all": 0.6330139928196159, "acc,all": 0.6860696517412935, - "main_score": 0.4747604946147119 + "main_score": 0.6330139928196159 } }, "config_tasks": { diff --git a/MaziyarPanahi/Topxtral-4x7B-v0.1/raw_2024-06-13T01-59-37.042342/results.json b/MaziyarPanahi/Topxtral-4x7B-v0.1/raw_2024-06-13T01-59-37.042342/results.json index 2505654557aa8c1aa364c1def00fc7b81c28dc19..44efca6fe3966f1464b463ae526a7c596d862149 100644 --- a/MaziyarPanahi/Topxtral-4x7B-v0.1/raw_2024-06-13T01-59-37.042342/results.json +++ b/MaziyarPanahi/Topxtral-4x7B-v0.1/raw_2024-06-13T01-59-37.042342/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9207507876328296, - "acc,all": 0.920751633986928, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7759288997534963, - "mse,all": 0.434399676879085, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5479833101529903, - "acc,exam_id__UNICAMP_2023": 0.6511627906976745, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2021": 0.5, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__USP_2020": 0.5178571428571429, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6431070678796361, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2016_2": 0.6178861788617886, - "acc,exam_id__2012": 0.6293103448275862, - "acc,exam_id__2023": 0.6444444444444445, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2010": 0.7094017094017094, - "acc,exam_id__2011": 0.6581196581196581, - "acc,exam_id__2009": 0.6347826086956522 - }, - "faquad_nli": { - "f1_macro,all": 0.7802301835247021, - "acc,all": 0.8461538461538461, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8072877590949881, - "acc,all": 0.8128571428571428 - }, - "oab_exams": { - "acc,all": 0.4145785876993166, - "acc,exam_id__2012-06a": 0.3875, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2014-15": 0.5, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2018-25": 0.4875, - "acc,exam_id__2014-13": 0.3, - "acc,exam_id__2010-02": 0.41, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2011-03": 0.31313131313131315, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2017-22": 0.5625, - "acc,exam_id__2016-20a": 0.3625, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2016-20": 0.3875, - "acc,exam_id__2015-16": 0.3375, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2017-24": 0.3375, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2012-06": 0.4625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.697753184918286, - "acc,all": 0.7567567567567568 - }, - "tweetsentbr": { - "f1_macro,all": 0.4853495476290667, - "acc,all": 0.7009950248756219, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9207507876328296, + "acc,all": 0.920751633986928, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7759288997534963, + "mse,all": 0.434399676879085, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5479833101529903, + "acc,exam_id__UNICAMP_2023": 0.6511627906976745, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2021": 0.5, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__USP_2020": 0.5178571428571429, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6431070678796361, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2016_2": 0.6178861788617886, + "acc,exam_id__2012": 0.6293103448275862, + "acc,exam_id__2023": 0.6444444444444445, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2010": 0.7094017094017094, + "acc,exam_id__2011": 0.6581196581196581, + "acc,exam_id__2009": 0.6347826086956522 + }, + "faquad_nli": { + "f1_macro,all": 0.7802301835247021, + "acc,all": 0.8461538461538461, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8072877590949881, + "acc,all": 0.8128571428571428 + }, + "oab_exams": { + "acc,all": 0.4145785876993166, + "acc,exam_id__2012-06a": 0.3875, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2014-15": 0.5, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2018-25": 0.4875, + "acc,exam_id__2014-13": 0.3, + "acc,exam_id__2010-02": 0.41, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2011-03": 0.31313131313131315, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2017-22": 0.5625, + "acc,exam_id__2016-20a": 0.3625, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2016-20": 0.3875, + "acc,exam_id__2015-16": 0.3375, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2017-24": 0.3375, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2012-06": 0.4625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.697753184918286, + "acc,all": 0.7567567567567568 + }, + "tweetsentbr": { + "f1_macro,all": 0.6471327301720889, + "acc,all": 0.7009950248756219, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "1a219935a01db03820ddabb2e29c199222a772e5", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 37569708032, - "model_num_parameters": 18516414464, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "1a219935a01db03820ddabb2e29c199222a772e5", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 37569708032, + "model_num_parameters": 18516414464, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MaziyarPanahi/Topxtral-4x7B-v0.1,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MaziyarPanahi/Topxtral-4x7B-v0.1,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "f2a0116" + "git_hash": "f2a0116" } \ No newline at end of file diff --git a/MaziyarPanahi/Topxtral-4x7B-v0.1/results_2024-06-13T01-59-37.042342.json b/MaziyarPanahi/Topxtral-4x7B-v0.1/results_2024-06-13T01-59-37.042342.json index 643cc9477426ae0431d70de55e12710a53678c54..4a37ffe1a24cd818e1b10db74cd3379faef61603 100644 --- a/MaziyarPanahi/Topxtral-4x7B-v0.1/results_2024-06-13T01-59-37.042342.json +++ b/MaziyarPanahi/Topxtral-4x7B-v0.1/results_2024-06-13T01-59-37.042342.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6747743698094791, - "all_grouped_npm": 0.5191238709611985, + "all_grouped_average": 0.6927502789809261, + "all_grouped_npm": 0.5458737357996613, "all_grouped": { "enem_challenge": 0.6431070678796361, "bluex": 0.5479833101529903, @@ -45,7 +45,7 @@ "faquad_nli": 0.7802301835247021, "hatebr_offensive": 0.8072877590949881, "portuguese_hate_speech": 0.697753184918286, - "tweetsentbr": 0.4853495476290667 + "tweetsentbr": 0.6471327301720889 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6431070678796361, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7802301835247021, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8072877590949881, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.697753184918286, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4853495476290667 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6471327301720889 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6431070678796361, @@ -150,9 +150,9 @@ "main_score": 0.697753184918286 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4853495476290667, + "f1_macro,all": 0.6471327301720889, "acc,all": 0.7009950248756219, - "main_score": 0.4853495476290667 + "main_score": 0.6471327301720889 } }, "config_tasks": { diff --git a/MulaBR/Mula-4x160-v0.1/raw_2024-04-22T00-05-24.255163/results.json b/MulaBR/Mula-4x160-v0.1/raw_2024-04-22T00-05-24.255163/results.json index 357c6f9f7a9bc82819d6121876178adc45e9458c..8e976ffccb0b4e26be5d580a88c49dd178260cbf 100644 --- a/MulaBR/Mula-4x160-v0.1/raw_2024-04-22T00-05-24.255163/results.json +++ b/MulaBR/Mula-4x160-v0.1/raw_2024-04-22T00-05-24.255163/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.335683441456502, - "acc,all": 0.5004084967320261, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.11349165436666529, - "mse,all": 1.0803349673202614, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.2517385257301808, - "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__UNICAMP_2018": 0.3148148148148148, - "acc,exam_id__USP_2021": 0.17307692307692307, - "acc,exam_id__UNICAMP_2022": 0.20512820512820512, - "acc,exam_id__UNICAMP_2024": 0.26666666666666666, - "acc,exam_id__USP_2024": 0.2682926829268293, - "acc,exam_id__USP_2022": 0.14285714285714285, - "acc,exam_id__UNICAMP_2020": 0.2909090909090909, - "acc,exam_id__USP_2018": 0.2037037037037037, - "acc,exam_id__UNICAMP_2023": 0.3023255813953488, - "acc,exam_id__UNICAMP_2019": 0.24, - "acc,exam_id__USP_2020": 0.26785714285714285, - "acc,exam_id__UNICAMP_2021_1": 0.2391304347826087, - "acc,exam_id__USP_2023": 0.22727272727272727, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.21343596920923724, - "acc,exam_id__2013": 0.2222222222222222, - "acc,exam_id__2014": 0.1743119266055046, - "acc,exam_id__2012": 0.15517241379310345, - "acc,exam_id__2011": 0.21367521367521367, - "acc,exam_id__2017": 0.25862068965517243, - "acc,exam_id__2009": 0.1826086956521739, - "acc,exam_id__2016": 0.24793388429752067, - "acc,exam_id__2023": 0.18518518518518517, - "acc,exam_id__2015": 0.2773109243697479, - "acc,exam_id__2010": 0.20512820512820512, - "acc,exam_id__2016_2": 0.24390243902439024, - "acc,exam_id__2022": 0.19548872180451127 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.41502718891863716, - "acc,all": 0.49357142857142855 - }, - "oab_exams": { - "acc,all": 0.2505694760820046, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2012-07": 0.2625, - "acc,exam_id__2015-18": 0.175, - "acc,exam_id__2010-02": 0.21, - "acc,exam_id__2016-20a": 0.2, - "acc,exam_id__2014-15": 0.32051282051282054, - "acc,exam_id__2012-06a": 0.25, - "acc,exam_id__2011-03": 0.24242424242424243, - "acc,exam_id__2014-13": 0.2125, - "acc,exam_id__2013-11": 0.3375, - "acc,exam_id__2017-24": 0.2375, - "acc,exam_id__2011-05": 0.225, - "acc,exam_id__2016-20": 0.225, - "acc,exam_id__2010-01": 0.23529411764705882, - "acc,exam_id__2017-23": 0.225, - "acc,exam_id__2018-25": 0.325, - "acc,exam_id__2014-14": 0.275, - "acc,exam_id__2012-06": 0.2125, - "acc,exam_id__2013-12": 0.2875, - "acc,exam_id__2015-16": 0.15, - "acc,exam_id__2011-04": 0.2625, - "acc,exam_id__2016-21": 0.225, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2017-22": 0.2625, - "acc,exam_id__2013-10": 0.2875, - "acc,exam_id__2015-17": 0.2692307692307692, - "acc,exam_id__2016-19": 0.21794871794871795, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.22986425339366515, - "acc,all": 0.2984723854289072 - }, - "tweetsentbr": { - "f1_macro,all": 0.11244668476153548, - "acc,all": 0.2885572139303483, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.335683441456502, + "acc,all": 0.5004084967320261, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.11349165436666529, + "mse,all": 1.0803349673202614, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.2517385257301808, + "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__UNICAMP_2018": 0.3148148148148148, + "acc,exam_id__USP_2021": 0.17307692307692307, + "acc,exam_id__UNICAMP_2022": 0.20512820512820512, + "acc,exam_id__UNICAMP_2024": 0.26666666666666666, + "acc,exam_id__USP_2024": 0.2682926829268293, + "acc,exam_id__USP_2022": 0.14285714285714285, + "acc,exam_id__UNICAMP_2020": 0.2909090909090909, + "acc,exam_id__USP_2018": 0.2037037037037037, + "acc,exam_id__UNICAMP_2023": 0.3023255813953488, + "acc,exam_id__UNICAMP_2019": 0.24, + "acc,exam_id__USP_2020": 0.26785714285714285, + "acc,exam_id__UNICAMP_2021_1": 0.2391304347826087, + "acc,exam_id__USP_2023": 0.22727272727272727, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.21343596920923724, + "acc,exam_id__2013": 0.2222222222222222, + "acc,exam_id__2014": 0.1743119266055046, + "acc,exam_id__2012": 0.15517241379310345, + "acc,exam_id__2011": 0.21367521367521367, + "acc,exam_id__2017": 0.25862068965517243, + "acc,exam_id__2009": 0.1826086956521739, + "acc,exam_id__2016": 0.24793388429752067, + "acc,exam_id__2023": 0.18518518518518517, + "acc,exam_id__2015": 0.2773109243697479, + "acc,exam_id__2010": 0.20512820512820512, + "acc,exam_id__2016_2": 0.24390243902439024, + "acc,exam_id__2022": 0.19548872180451127 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.41502718891863716, + "acc,all": 0.49357142857142855 + }, + "oab_exams": { + "acc,all": 0.2505694760820046, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2012-07": 0.2625, + "acc,exam_id__2015-18": 0.175, + "acc,exam_id__2010-02": 0.21, + "acc,exam_id__2016-20a": 0.2, + "acc,exam_id__2014-15": 0.32051282051282054, + "acc,exam_id__2012-06a": 0.25, + "acc,exam_id__2011-03": 0.24242424242424243, + "acc,exam_id__2014-13": 0.2125, + "acc,exam_id__2013-11": 0.3375, + "acc,exam_id__2017-24": 0.2375, + "acc,exam_id__2011-05": 0.225, + "acc,exam_id__2016-20": 0.225, + "acc,exam_id__2010-01": 0.23529411764705882, + "acc,exam_id__2017-23": 0.225, + "acc,exam_id__2018-25": 0.325, + "acc,exam_id__2014-14": 0.275, + "acc,exam_id__2012-06": 0.2125, + "acc,exam_id__2013-12": 0.2875, + "acc,exam_id__2015-16": 0.15, + "acc,exam_id__2011-04": 0.2625, + "acc,exam_id__2016-21": 0.225, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2017-22": 0.2625, + "acc,exam_id__2013-10": 0.2875, + "acc,exam_id__2015-17": 0.2692307692307692, + "acc,exam_id__2016-19": 0.21794871794871795, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.22986425339366515, + "acc,all": 0.2984723854289072 + }, + "tweetsentbr": { + "f1_macro,all": 0.14992891301538064, + "acc,all": 0.2885572139303483, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 2, - "non_truncated": 14148, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "01f3a014bf615c9f1b60598dc7d132c90b7d5bdc", - "model_dtype": "torch.float16", - "model_memory_footprint": 840809472, - "model_num_parameters": 417258240, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 925.4232026143791, - "min_seq_length": 910, - "max_seq_length": 964, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 966.4232026143791, - "min_seq_length": 951, - "max_seq_length": 1005, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1171.817802503477, - "min_seq_length": 905, - "max_seq_length": 1802, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 4, - "mean_seq_length": 1008.4177746675997, - "min_seq_length": 830, - "max_seq_length": 2485, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972008397480754 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 969.1338461538462, - "min_seq_length": 937, - "max_seq_length": 1035, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 2, + "non_truncated": 14148, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "01f3a014bf615c9f1b60598dc7d132c90b7d5bdc", + "model_dtype": "torch.float16", + "model_memory_footprint": 840809472, + "model_num_parameters": 417258240, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 868.4407142857143, - "min_seq_length": 853, - "max_seq_length": 1062, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 833.024145785877, - "min_seq_length": 660, - "max_seq_length": 1109, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 925.4232026143791, + "min_seq_length": 910, + "max_seq_length": 964, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 966.4232026143791, + "min_seq_length": 951, + "max_seq_length": 1005, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1171.817802503477, + "min_seq_length": 905, + "max_seq_length": 1802, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 4, + "mean_seq_length": 1008.4177746675997, + "min_seq_length": 830, + "max_seq_length": 2485, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972008397480754 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 969.1338461538462, + "min_seq_length": 937, + "max_seq_length": 1035, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 868.4407142857143, + "min_seq_length": 853, + "max_seq_length": 1062, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 833.024145785877, + "min_seq_length": 660, + "max_seq_length": 1109, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1220.021151586369, + "min_seq_length": 1193, + "max_seq_length": 1256, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1155.4194029850746, + "min_seq_length": 1138, + "max_seq_length": 1212, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1220.021151586369, - "min_seq_length": 1193, - "max_seq_length": 1256, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MulaBR/Mula-4x160-v0.1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1155.4194029850746, - "min_seq_length": 1138, - "max_seq_length": 1212, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MulaBR/Mula-4x160-v0.1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/MulaBR/Mula-4x160-v0.1/results_2024-04-22T00-05-24.255163.json b/MulaBR/Mula-4x160-v0.1/results_2024-04-22T00-05-24.255163.json index 4beda4aadd45353298eb5264d976a3879b680490..0bc14d48f1a8d9a13015011baae1a96e0bcea5d9 100644 --- a/MulaBR/Mula-4x160-v0.1/results_2024-04-22T00-05-24.255163.json +++ b/MulaBR/Mula-4x160-v0.1/results_2024-04-22T00-05-24.255163.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.26243470737024677, - "all_grouped_npm": -0.12911429030679075, + "all_grouped_average": 0.2665993993984518, + "all_grouped_npm": -0.12291683193148567, "all_grouped": { "enem_challenge": 0.21343596920923724, "bluex": 0.2517385257301808, @@ -45,7 +45,7 @@ "faquad_nli": 0.4396551724137931, "hatebr_offensive": 0.41502718891863716, "portuguese_hate_speech": 0.22986425339366515, - "tweetsentbr": 0.11244668476153548 + "tweetsentbr": 0.14992891301538064 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.21343596920923724, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.41502718891863716, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.22986425339366515, - "harness|tweetsentbr|tweetsentbr|None|25": 0.11244668476153548 + "harness|tweetsentbr|tweetsentbr|None|25": 0.14992891301538064 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.21343596920923724, @@ -150,9 +150,9 @@ "main_score": 0.22986425339366515 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.11244668476153548, + "f1_macro,all": 0.14992891301538064, "acc,all": 0.2885572139303483, - "main_score": 0.11244668476153548 + "main_score": 0.14992891301538064 } }, "config_tasks": { diff --git a/MulaBR/Mula-8x160-v0.1/raw_2024-05-08T21-44-31.351826/results.json b/MulaBR/Mula-8x160-v0.1/raw_2024-05-08T21-44-31.351826/results.json index fe5736ae9e9d1cbb9ed264097d1d263e826462e8..90ab0ce37775c8c9ccd3a78dd2ef3427f0bd40ad 100644 --- a/MulaBR/Mula-8x160-v0.1/raw_2024-05-08T21-44-31.351826/results.json +++ b/MulaBR/Mula-8x160-v0.1/raw_2024-05-08T21-44-31.351826/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.22379028616587124, - "acc,all": 0.5004084967320261, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.04732817202670604, - "mse,all": 2.2953063725490197, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.21279554937413073, - "acc,exam_id__UNICAMP_2023": 0.2558139534883721, - "acc,exam_id__UNICAMP_2024": 0.17777777777777778, - "acc,exam_id__UNICAMP_2019": 0.2, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__UNICAMP_2020": 0.21818181818181817, - "acc,exam_id__USP_2022": 0.22448979591836735, - "acc,exam_id__UNICAMP_2021_1": 0.32608695652173914, - "acc,exam_id__UNICAMP_2021_2": 0.23529411764705882, - "acc,exam_id__USP_2024": 0.21951219512195122, - "acc,exam_id__UNICAMP_2022": 0.28205128205128205, - "acc,exam_id__USP_2020": 0.14285714285714285, - "acc,exam_id__USP_2018": 0.16666666666666666, - "acc,exam_id__USP_2021": 0.15384615384615385, - "acc,exam_id__USP_2023": 0.13636363636363635, - "acc,exam_id__UNICAMP_2018": 0.24074074074074073, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.20503848845346395, - "acc,exam_id__2023": 0.17037037037037037, - "acc,exam_id__2009": 0.22608695652173913, - "acc,exam_id__2014": 0.24770642201834864, - "acc,exam_id__2022": 0.19548872180451127, - "acc,exam_id__2015": 0.25210084033613445, - "acc,exam_id__2017": 0.2413793103448276, - "acc,exam_id__2016": 0.19008264462809918, - "acc,exam_id__2011": 0.20512820512820512, - "acc,exam_id__2013": 0.17592592592592593, - "acc,exam_id__2010": 0.18803418803418803, - "acc,exam_id__2016_2": 0.2032520325203252, - "acc,exam_id__2012": 0.1724137931034483 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.3333333333333333, - "acc,all": 0.5 - }, - "oab_exams": { - "acc,all": 0.26651480637813213, - "acc,exam_id__2010-01": 0.23529411764705882, - "acc,exam_id__2017-24": 0.275, - "acc,exam_id__2012-09": 0.2987012987012987, - "acc,exam_id__2012-06a": 0.25, - "acc,exam_id__2012-06": 0.225, - "acc,exam_id__2010-02": 0.25, - "acc,exam_id__2016-19": 0.2692307692307692, - "acc,exam_id__2014-14": 0.3125, - "acc,exam_id__2014-13": 0.25, - "acc,exam_id__2015-17": 0.24358974358974358, - "acc,exam_id__2014-15": 0.32051282051282054, - "acc,exam_id__2012-07": 0.3, - "acc,exam_id__2012-08": 0.325, - "acc,exam_id__2015-18": 0.2, - "acc,exam_id__2015-16": 0.2125, - "acc,exam_id__2011-05": 0.25, - "acc,exam_id__2011-03": 0.26262626262626265, - "acc,exam_id__2016-20": 0.2375, - "acc,exam_id__2013-10": 0.3125, - "acc,exam_id__2016-20a": 0.225, - "acc,exam_id__2011-04": 0.2625, - "acc,exam_id__2017-23": 0.2, - "acc,exam_id__2013-11": 0.3625, - "acc,exam_id__2013-12": 0.275, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2017-22": 0.25, - "acc,exam_id__2016-21": 0.3125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.40205171444631815, - "acc,all": 0.49471210340775557 - }, - "tweetsentbr": { - "f1_macro,all": 0.18457882485126118, - "acc,all": 0.44975124378109455, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.33568542924880684, + "acc,all": 0.5004084967320261, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.04732817202670604, + "mse,all": 2.2953063725490197, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.21279554937413073, + "acc,exam_id__UNICAMP_2023": 0.2558139534883721, + "acc,exam_id__UNICAMP_2024": 0.17777777777777778, + "acc,exam_id__UNICAMP_2019": 0.2, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__UNICAMP_2020": 0.21818181818181817, + "acc,exam_id__USP_2022": 0.22448979591836735, + "acc,exam_id__UNICAMP_2021_1": 0.32608695652173914, + "acc,exam_id__UNICAMP_2021_2": 0.23529411764705882, + "acc,exam_id__USP_2024": 0.21951219512195122, + "acc,exam_id__UNICAMP_2022": 0.28205128205128205, + "acc,exam_id__USP_2020": 0.14285714285714285, + "acc,exam_id__USP_2018": 0.16666666666666666, + "acc,exam_id__USP_2021": 0.15384615384615385, + "acc,exam_id__USP_2023": 0.13636363636363635, + "acc,exam_id__UNICAMP_2018": 0.24074074074074073, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.20503848845346395, + "acc,exam_id__2023": 0.17037037037037037, + "acc,exam_id__2009": 0.22608695652173913, + "acc,exam_id__2014": 0.24770642201834864, + "acc,exam_id__2022": 0.19548872180451127, + "acc,exam_id__2015": 0.25210084033613445, + "acc,exam_id__2017": 0.2413793103448276, + "acc,exam_id__2016": 0.19008264462809918, + "acc,exam_id__2011": 0.20512820512820512, + "acc,exam_id__2013": 0.17592592592592593, + "acc,exam_id__2010": 0.18803418803418803, + "acc,exam_id__2016_2": 0.2032520325203252, + "acc,exam_id__2012": 0.1724137931034483 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.3333333333333333, + "acc,all": 0.5 + }, + "oab_exams": { + "acc,all": 0.26651480637813213, + "acc,exam_id__2010-01": 0.23529411764705882, + "acc,exam_id__2017-24": 0.275, + "acc,exam_id__2012-09": 0.2987012987012987, + "acc,exam_id__2012-06a": 0.25, + "acc,exam_id__2012-06": 0.225, + "acc,exam_id__2010-02": 0.25, + "acc,exam_id__2016-19": 0.2692307692307692, + "acc,exam_id__2014-14": 0.3125, + "acc,exam_id__2014-13": 0.25, + "acc,exam_id__2015-17": 0.24358974358974358, + "acc,exam_id__2014-15": 0.32051282051282054, + "acc,exam_id__2012-07": 0.3, + "acc,exam_id__2012-08": 0.325, + "acc,exam_id__2015-18": 0.2, + "acc,exam_id__2015-16": 0.2125, + "acc,exam_id__2011-05": 0.25, + "acc,exam_id__2011-03": 0.26262626262626265, + "acc,exam_id__2016-20": 0.2375, + "acc,exam_id__2013-10": 0.3125, + "acc,exam_id__2016-20a": 0.225, + "acc,exam_id__2011-04": 0.2625, + "acc,exam_id__2017-23": 0.2, + "acc,exam_id__2013-11": 0.3625, + "acc,exam_id__2013-12": 0.275, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2017-22": 0.25, + "acc,exam_id__2016-21": 0.3125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.40205171444631815, + "acc,all": 0.49471210340775557 + }, + "tweetsentbr": { + "f1_macro,all": 0.2461050998016816, + "acc,all": 0.44975124378109455, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 2, - "non_truncated": 14148, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2e286d3fd8cc1b6d600b8fb8aeb587d2eac61060", - "model_dtype": "torch.float16", - "model_memory_footprint": 1501486080, - "model_num_parameters": 747596544, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 925.4232026143791, - "min_seq_length": 910, - "max_seq_length": 964, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 966.4232026143791, - "min_seq_length": 951, - "max_seq_length": 1005, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1171.817802503477, - "min_seq_length": 905, - "max_seq_length": 1802, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 4, - "mean_seq_length": 1008.4177746675997, - "min_seq_length": 830, - "max_seq_length": 2485, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972008397480754 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 969.1338461538462, - "min_seq_length": 937, - "max_seq_length": 1035, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 2, + "non_truncated": 14148, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2e286d3fd8cc1b6d600b8fb8aeb587d2eac61060", + "model_dtype": "torch.float16", + "model_memory_footprint": 1501486080, + "model_num_parameters": 747596544, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 868.4407142857143, - "min_seq_length": 853, - "max_seq_length": 1062, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 833.024145785877, - "min_seq_length": 660, - "max_seq_length": 1109, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 925.4232026143791, + "min_seq_length": 910, + "max_seq_length": 964, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 966.4232026143791, + "min_seq_length": 951, + "max_seq_length": 1005, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1171.817802503477, + "min_seq_length": 905, + "max_seq_length": 1802, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 4, + "mean_seq_length": 1008.4177746675997, + "min_seq_length": 830, + "max_seq_length": 2485, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972008397480754 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 969.1338461538462, + "min_seq_length": 937, + "max_seq_length": 1035, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 868.4407142857143, + "min_seq_length": 853, + "max_seq_length": 1062, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 833.024145785877, + "min_seq_length": 660, + "max_seq_length": 1109, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1220.021151586369, + "min_seq_length": 1193, + "max_seq_length": 1256, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1155.4194029850746, + "min_seq_length": 1138, + "max_seq_length": 1212, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1220.021151586369, - "min_seq_length": 1193, - "max_seq_length": 1256, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=MulaBR/Mula-8x160-v0.1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1155.4194029850746, - "min_seq_length": 1138, - "max_seq_length": 1212, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=MulaBR/Mula-8x160-v0.1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/MulaBR/Mula-8x160-v0.1/results_2024-05-08T21-44-31.351826.json b/MulaBR/Mula-8x160-v0.1/results_2024-05-08T21-44-31.351826.json index ec390644307ceb9c7d47d4fe03294eb90f05a269..4abaaff5241f1dc8a42f80da4e7f2d9d76efa1a7 100644 --- a/MulaBR/Mula-8x160-v0.1/results_2024-05-08T21-44-31.351826.json +++ b/MulaBR/Mula-8x160-v0.1/results_2024-05-08T21-44-31.351826.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.25723181638255666, - "all_grouped_npm": -0.13522420754264466, + "all_grouped_average": 0.2765008628307073, + "all_grouped_npm": -0.10018562499405242, "all_grouped": { "enem_challenge": 0.20503848845346395, "bluex": 0.21279554937413073, "oab_exams": 0.26651480637813213, - "assin2_rte": 0.22379028616587124, + "assin2_rte": 0.33568542924880684, "assin2_sts": 0.04732817202670604, "faquad_nli": 0.4396551724137931, "hatebr_offensive": 0.3333333333333333, "portuguese_hate_speech": 0.40205171444631815, - "tweetsentbr": 0.18457882485126118 + "tweetsentbr": 0.2461050998016816 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.20503848845346395, "harness|bluex|bluex|None|3": 0.21279554937413073, "harness|oab_exams|oab_exams|None|3": 0.26651480637813213, - "harness|assin2_rte|assin2_rte|None|15": 0.22379028616587124, + "harness|assin2_rte|assin2_rte|None|15": 0.33568542924880684, "harness|assin2_sts|assin2_sts|None|15": 0.04732817202670604, "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3333333333333333, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.40205171444631815, - "harness|tweetsentbr|tweetsentbr|None|25": 0.18457882485126118 + "harness|tweetsentbr|tweetsentbr|None|25": 0.2461050998016816 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.20503848845346395, @@ -125,9 +125,9 @@ "main_score": 0.26651480637813213 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.22379028616587124, + "f1_macro,all": 0.33568542924880684, "acc,all": 0.5004084967320261, - "main_score": 0.22379028616587124 + "main_score": 0.33568542924880684 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.04732817202670604, @@ -150,9 +150,9 @@ "main_score": 0.40205171444631815 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.18457882485126118, + "f1_macro,all": 0.2461050998016816, "acc,all": 0.44975124378109455, - "main_score": 0.18457882485126118 + "main_score": 0.2461050998016816 } }, "config_tasks": { diff --git a/NLPark/AnFeng_v3_Avocet/raw_2024-05-25T07-09-02.650733/results.json b/NLPark/AnFeng_v3_Avocet/raw_2024-05-25T07-09-02.650733/results.json index e4a81dde6c6276311548095770aa6d2b35008034..dab3047d28a6be988d88c4a573c046b2828f623e 100644 --- a/NLPark/AnFeng_v3_Avocet/raw_2024-05-25T07-09-02.650733/results.json +++ b/NLPark/AnFeng_v3_Avocet/raw_2024-05-25T07-09-02.650733/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.4302454973829182, - "acc,all": 0.571078431372549, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.1094044556850774, - "mse,all": 3.008386437908497, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.09596662030598054, - "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, - "acc,exam_id__UNICAMP_2020": 0.18181818181818182, - "acc,exam_id__USP_2018": 0.1111111111111111, - "acc,exam_id__USP_2024": 0.07317073170731707, - "acc,exam_id__UNICAMP_2022": 0.05128205128205128, - "acc,exam_id__UNICAMP_2023": 0.23255813953488372, - "acc,exam_id__USP_2019": 0.05, - "acc,exam_id__USP_2020": 0.05357142857142857, - "acc,exam_id__UNICAMP_2021_1": 0.10869565217391304, - "acc,exam_id__UNICAMP_2018": 0.09259259259259259, - "acc,exam_id__UNICAMP_2024": 0.17777777777777778, - "acc,exam_id__USP_2023": 0.11363636363636363, - "acc,exam_id__UNICAMP_2019": 0.12, - "acc,exam_id__USP_2021": 0.057692307692307696, - "acc,exam_id__USP_2022": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.031490552834149754, - "acc,exam_id__2010": 0.042735042735042736, - "acc,exam_id__2015": 0.01680672268907563, - "acc,exam_id__2009": 0.02608695652173913, - "acc,exam_id__2011": 0.017094017094017096, - "acc,exam_id__2022": 0.022556390977443608, - "acc,exam_id__2012": 0.05172413793103448, - "acc,exam_id__2014": 0.045871559633027525, - "acc,exam_id__2017": 0.02586206896551724, - "acc,exam_id__2023": 0.014814814814814815, - "acc,exam_id__2016": 0.03305785123966942, - "acc,exam_id__2016_2": 0.04065040650406504, - "acc,exam_id__2013": 0.046296296296296294 - }, - "faquad_nli": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.3014632275602253, - "acc,all": 0.3264285714285714 - }, - "oab_exams": { - "acc,all": 0.004100227790432802, - "acc,exam_id__2016-20a": 0.0125, - "acc,exam_id__2015-18": 0.0, - "acc,exam_id__2010-02": 0.0, - "acc,exam_id__2016-19": 0.0, - "acc,exam_id__2012-09": 0.012987012987012988, - "acc,exam_id__2012-07": 0.0125, - "acc,exam_id__2013-11": 0.0125, - "acc,exam_id__2017-23": 0.0, - "acc,exam_id__2011-03": 0.0, - "acc,exam_id__2017-22": 0.0, - "acc,exam_id__2012-08": 0.0125, - "acc,exam_id__2014-13": 0.0, - "acc,exam_id__2012-06a": 0.0, - "acc,exam_id__2017-24": 0.0, - "acc,exam_id__2015-17": 0.0, - "acc,exam_id__2018-25": 0.0125, - "acc,exam_id__2016-20": 0.0, - "acc,exam_id__2014-14": 0.0125, - "acc,exam_id__2012-06": 0.0, - "acc,exam_id__2013-12": 0.0, - "acc,exam_id__2011-04": 0.0, - "acc,exam_id__2013-10": 0.0125, - "acc,exam_id__2015-16": 0.0, - "acc,exam_id__2014-15": 0.0, - "acc,exam_id__2016-21": 0.0, - "acc,exam_id__2010-01": 0.011764705882352941, - "acc,exam_id__2011-05": 0.0, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.309880948022061, - "acc,all": 0.3807285546415981 - }, - "tweetsentbr": { - "f1_macro,all": 0.21382013119566504, - "acc,all": 0.2253731343283582, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6453682460743775, + "acc,all": 0.571078431372549, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.1094044556850774, + "mse,all": 3.008386437908497, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.09596662030598054, + "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, + "acc,exam_id__UNICAMP_2020": 0.18181818181818182, + "acc,exam_id__USP_2018": 0.1111111111111111, + "acc,exam_id__USP_2024": 0.07317073170731707, + "acc,exam_id__UNICAMP_2022": 0.05128205128205128, + "acc,exam_id__UNICAMP_2023": 0.23255813953488372, + "acc,exam_id__USP_2019": 0.05, + "acc,exam_id__USP_2020": 0.05357142857142857, + "acc,exam_id__UNICAMP_2021_1": 0.10869565217391304, + "acc,exam_id__UNICAMP_2018": 0.09259259259259259, + "acc,exam_id__UNICAMP_2024": 0.17777777777777778, + "acc,exam_id__USP_2023": 0.11363636363636363, + "acc,exam_id__UNICAMP_2019": 0.12, + "acc,exam_id__USP_2021": 0.057692307692307696, + "acc,exam_id__USP_2022": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.031490552834149754, + "acc,exam_id__2010": 0.042735042735042736, + "acc,exam_id__2015": 0.01680672268907563, + "acc,exam_id__2009": 0.02608695652173913, + "acc,exam_id__2011": 0.017094017094017096, + "acc,exam_id__2022": 0.022556390977443608, + "acc,exam_id__2012": 0.05172413793103448, + "acc,exam_id__2014": 0.045871559633027525, + "acc,exam_id__2017": 0.02586206896551724, + "acc,exam_id__2023": 0.014814814814814815, + "acc,exam_id__2016": 0.03305785123966942, + "acc,exam_id__2016_2": 0.04065040650406504, + "acc,exam_id__2013": 0.046296296296296294 + }, + "faquad_nli": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.4521948413403378, + "acc,all": 0.3264285714285714 + }, + "oab_exams": { + "acc,all": 0.004100227790432802, + "acc,exam_id__2016-20a": 0.0125, + "acc,exam_id__2015-18": 0.0, + "acc,exam_id__2010-02": 0.0, + "acc,exam_id__2016-19": 0.0, + "acc,exam_id__2012-09": 0.012987012987012988, + "acc,exam_id__2012-07": 0.0125, + "acc,exam_id__2013-11": 0.0125, + "acc,exam_id__2017-23": 0.0, + "acc,exam_id__2011-03": 0.0, + "acc,exam_id__2017-22": 0.0, + "acc,exam_id__2012-08": 0.0125, + "acc,exam_id__2014-13": 0.0, + "acc,exam_id__2012-06a": 0.0, + "acc,exam_id__2017-24": 0.0, + "acc,exam_id__2015-17": 0.0, + "acc,exam_id__2018-25": 0.0125, + "acc,exam_id__2016-20": 0.0, + "acc,exam_id__2014-14": 0.0125, + "acc,exam_id__2012-06": 0.0, + "acc,exam_id__2013-12": 0.0, + "acc,exam_id__2011-04": 0.0, + "acc,exam_id__2013-10": 0.0125, + "acc,exam_id__2015-16": 0.0, + "acc,exam_id__2014-15": 0.0, + "acc,exam_id__2016-21": 0.0, + "acc,exam_id__2010-01": 0.011764705882352941, + "acc,exam_id__2011-05": 0.0, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4648214220330915, + "acc,all": 0.3807285546415981 + }, + "tweetsentbr": { + "f1_macro,all": 0.2850935082608867, + "acc,all": 0.2253731343283582, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e490fec19a769104bcb2b7f7a6400c9a0a49be4b", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 69961672704, - "model_num_parameters": 34980831232, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1099.3545751633987, - "min_seq_length": 1083, - "max_seq_length": 1143, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1344.3545751633987, - "min_seq_length": 1328, - "max_seq_length": 1388, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1368.8970792767732, - "min_seq_length": 1089, - "max_seq_length": 1962, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1230.5381385584324, - "min_seq_length": 1040, - "max_seq_length": 2219, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1199.0676923076924, - "min_seq_length": 1163, - "max_seq_length": 1266, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e490fec19a769104bcb2b7f7a6400c9a0a49be4b", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 69961672704, + "model_num_parameters": 34980831232, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1196.9871428571428, - "min_seq_length": 1180, - "max_seq_length": 1384, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 983.0168564920274, - "min_seq_length": 797, - "max_seq_length": 1304, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1099.3545751633987, + "min_seq_length": 1083, + "max_seq_length": 1143, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1344.3545751633987, + "min_seq_length": 1328, + "max_seq_length": 1388, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1368.8970792767732, + "min_seq_length": 1089, + "max_seq_length": 1962, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1230.5381385584324, + "min_seq_length": 1040, + "max_seq_length": 2219, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1199.0676923076924, + "min_seq_length": 1163, + "max_seq_length": 1266, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1196.9871428571428, + "min_seq_length": 1180, + "max_seq_length": 1384, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 983.0168564920274, + "min_seq_length": 797, + "max_seq_length": 1304, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1555.6169212690952, + "min_seq_length": 1528, + "max_seq_length": 1592, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1398.0830845771145, + "min_seq_length": 1382, + "max_seq_length": 1448, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1555.6169212690952, - "min_seq_length": 1528, - "max_seq_length": 1592, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=NLPark/AnFeng_v3_Avocet,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1398.0830845771145, - "min_seq_length": 1382, - "max_seq_length": 1448, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=NLPark/AnFeng_v3_Avocet,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/NLPark/AnFeng_v3_Avocet/results_2024-05-25T07-09-02.650733.json b/NLPark/AnFeng_v3_Avocet/results_2024-05-25T07-09-02.650733.json index 4f3e6bf32ee16d889803e7858694de337aea68bb..0700600e448807837af2467e60b55b969e3f2cc7 100644 --- a/NLPark/AnFeng_v3_Avocet/results_2024-05-25T07-09-02.650733.json +++ b/NLPark/AnFeng_v3_Avocet/results_2024-05-25T07-09-02.650733.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.1662635178640567, - "all_grouped_npm": -0.2738807188079487, + "all_grouped_average": 0.23204887492492599, + "all_grouped_npm": -0.1477517362431038, "all_grouped": { "enem_challenge": 0.031490552834149754, "bluex": 0.09596662030598054, "oab_exams": 0.004100227790432802, - "assin2_rte": 0.4302454973829182, + "assin2_rte": 0.6453682460743775, "assin2_sts": 0.1094044556850774, "faquad_nli": 0.0, - "hatebr_offensive": 0.3014632275602253, - "portuguese_hate_speech": 0.309880948022061, - "tweetsentbr": 0.21382013119566504 + "hatebr_offensive": 0.4521948413403378, + "portuguese_hate_speech": 0.4648214220330915, + "tweetsentbr": 0.2850935082608867 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.031490552834149754, "harness|bluex|bluex|None|3": 0.09596662030598054, "harness|oab_exams|oab_exams|None|3": 0.004100227790432802, - "harness|assin2_rte|assin2_rte|None|15": 0.4302454973829182, + "harness|assin2_rte|assin2_rte|None|15": 0.6453682460743775, "harness|assin2_sts|assin2_sts|None|15": 0.1094044556850774, "harness|faquad_nli|faquad_nli|None|15": 0.0, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3014632275602253, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.309880948022061, - "harness|tweetsentbr|tweetsentbr|None|25": 0.21382013119566504 + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4521948413403378, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4648214220330915, + "harness|tweetsentbr|tweetsentbr|None|25": 0.2850935082608867 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.031490552834149754, @@ -125,9 +125,9 @@ "main_score": 0.004100227790432802 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.4302454973829182, + "f1_macro,all": 0.6453682460743775, "acc,all": 0.571078431372549, - "main_score": 0.4302454973829182 + "main_score": 0.6453682460743775 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.1094044556850774, @@ -140,19 +140,19 @@ "main_score": 0.0 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.3014632275602253, + "f1_macro,all": 0.4521948413403378, "acc,all": 0.3264285714285714, - "main_score": 0.3014632275602253 + "main_score": 0.4521948413403378 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.309880948022061, + "f1_macro,all": 0.4648214220330915, "acc,all": 0.3807285546415981, - "main_score": 0.309880948022061 + "main_score": 0.4648214220330915 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.21382013119566504, + "f1_macro,all": 0.2850935082608867, "acc,all": 0.2253731343283582, - "main_score": 0.21382013119566504 + "main_score": 0.2850935082608867 } }, "config_tasks": { diff --git a/NOVA-vision-language/GlorIA-1.3B/raw_2024-03-07T22-34-35.217921/results.json b/NOVA-vision-language/GlorIA-1.3B/raw_2024-03-07T22-34-35.217921/results.json index 1665b22ce69402a967f6568af9b726b0c1728cce..314738f7d601d77853455394c58c97b94fe3f016 100644 --- a/NOVA-vision-language/GlorIA-1.3B/raw_2024-03-07T22-34-35.217921/results.json +++ b/NOVA-vision-language/GlorIA-1.3B/raw_2024-03-07T22-34-35.217921/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.023212602251989234, - "mse,all": 3.1345302325319917, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.031988873435326845, - "acc,exam_id__USP_2024": 0.04878048780487805, - "acc,exam_id__USP_2022": 0.0, - "acc,exam_id__USP_2023": 0.022727272727272728, - "acc,exam_id__USP_2020": 0.017857142857142856, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.043478260869565216, - "acc,exam_id__UNICAMP_2019": 0.06, - "acc,exam_id__UNICAMP_2023": 0.06976744186046512, - "acc,exam_id__USP_2019": 0.05, - "acc,exam_id__UNICAMP_2024": 0.044444444444444446, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__USP_2021": 0.019230769230769232, - "acc,exam_id__USP_2018": 0.09259259259259259, - "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, - "acc,exam_id__UNICAMP_2018": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.018894331700489854, - "acc,exam_id__2012": 0.0, - "acc,exam_id__2009": 0.02608695652173913, - "acc,exam_id__2014": 0.0, - "acc,exam_id__2016_2": 0.032520325203252036, - "acc,exam_id__2010": 0.008547008547008548, - "acc,exam_id__2011": 0.008547008547008548, - "acc,exam_id__2013": 0.027777777777777776, - "acc,exam_id__2016": 0.024793388429752067, - "acc,exam_id__2023": 0.014814814814814815, - "acc,exam_id__2017": 0.034482758620689655, - "acc,exam_id__2022": 0.0, - "acc,exam_id__2015": 0.05042016806722689 - }, - "faquad_nli": { - "f1_macro,all": 0.0026041666666666665, - "acc,all": 0.003076923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.0028436222959357994, - "acc,all": 0.002142857142857143 - }, - "oab_exams": { - "acc,all": 0.05193621867881549, - "acc,exam_id__2017-24": 0.0375, - "acc,exam_id__2012-06": 0.0875, - "acc,exam_id__2012-06a": 0.0625, - "acc,exam_id__2015-17": 0.01282051282051282, - "acc,exam_id__2011-04": 0.075, - "acc,exam_id__2017-23": 0.0625, - "acc,exam_id__2016-20a": 0.025, - "acc,exam_id__2011-03": 0.09090909090909091, - "acc,exam_id__2016-19": 0.0, - "acc,exam_id__2014-13": 0.075, - "acc,exam_id__2016-20": 0.0125, - "acc,exam_id__2013-10": 0.075, - "acc,exam_id__2015-16": 0.025, - "acc,exam_id__2013-11": 0.0625, - "acc,exam_id__2015-18": 0.05, - "acc,exam_id__2013-12": 0.0875, - "acc,exam_id__2014-15": 0.02564102564102564, - "acc,exam_id__2012-08": 0.05, - "acc,exam_id__2017-22": 0.05, - "acc,exam_id__2011-05": 0.0625, - "acc,exam_id__2012-07": 0.05, - "acc,exam_id__2012-09": 0.11688311688311688, - "acc,exam_id__2018-25": 0.025, - "acc,exam_id__2010-01": 0.047058823529411764, - "acc,exam_id__2014-14": 0.0375, - "acc,exam_id__2016-21": 0.025, - "acc,exam_id__2010-02": 0.06, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.23522853957636566, - "acc,all": 0.49588719153936545 - }, - "tweetsentbr": { - "f1_macro,all": 0.0018832391713747645, - "acc,all": 0.0009950248756218905, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.023212602251989234, + "mse,all": 3.1345302325319917, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.031988873435326845, + "acc,exam_id__USP_2024": 0.04878048780487805, + "acc,exam_id__USP_2022": 0.0, + "acc,exam_id__USP_2023": 0.022727272727272728, + "acc,exam_id__USP_2020": 0.017857142857142856, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.043478260869565216, + "acc,exam_id__UNICAMP_2019": 0.06, + "acc,exam_id__UNICAMP_2023": 0.06976744186046512, + "acc,exam_id__USP_2019": 0.05, + "acc,exam_id__UNICAMP_2024": 0.044444444444444446, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__USP_2021": 0.019230769230769232, + "acc,exam_id__USP_2018": 0.09259259259259259, + "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, + "acc,exam_id__UNICAMP_2018": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.018894331700489854, + "acc,exam_id__2012": 0.0, + "acc,exam_id__2009": 0.02608695652173913, + "acc,exam_id__2014": 0.0, + "acc,exam_id__2016_2": 0.032520325203252036, + "acc,exam_id__2010": 0.008547008547008548, + "acc,exam_id__2011": 0.008547008547008548, + "acc,exam_id__2013": 0.027777777777777776, + "acc,exam_id__2016": 0.024793388429752067, + "acc,exam_id__2023": 0.014814814814814815, + "acc,exam_id__2017": 0.034482758620689655, + "acc,exam_id__2022": 0.0, + "acc,exam_id__2015": 0.05042016806722689 + }, + "faquad_nli": { + "f1_macro,all": 0.00390625, + "acc,all": 0.003076923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.004265433443903698, + "acc,all": 0.002142857142857143 + }, + "oab_exams": { + "acc,all": 0.05193621867881549, + "acc,exam_id__2017-24": 0.0375, + "acc,exam_id__2012-06": 0.0875, + "acc,exam_id__2012-06a": 0.0625, + "acc,exam_id__2015-17": 0.01282051282051282, + "acc,exam_id__2011-04": 0.075, + "acc,exam_id__2017-23": 0.0625, + "acc,exam_id__2016-20a": 0.025, + "acc,exam_id__2011-03": 0.09090909090909091, + "acc,exam_id__2016-19": 0.0, + "acc,exam_id__2014-13": 0.075, + "acc,exam_id__2016-20": 0.0125, + "acc,exam_id__2013-10": 0.075, + "acc,exam_id__2015-16": 0.025, + "acc,exam_id__2013-11": 0.0625, + "acc,exam_id__2015-18": 0.05, + "acc,exam_id__2013-12": 0.0875, + "acc,exam_id__2014-15": 0.02564102564102564, + "acc,exam_id__2012-08": 0.05, + "acc,exam_id__2017-22": 0.05, + "acc,exam_id__2011-05": 0.0625, + "acc,exam_id__2012-07": 0.05, + "acc,exam_id__2012-09": 0.11688311688311688, + "acc,exam_id__2018-25": 0.025, + "acc,exam_id__2010-01": 0.047058823529411764, + "acc,exam_id__2014-14": 0.0375, + "acc,exam_id__2016-21": 0.025, + "acc,exam_id__2010-02": 0.06, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.35284280936454854, + "acc,all": 0.49588719153936545 + }, + "tweetsentbr": { + "f1_macro,all": 0.002510985561833019, + "acc,all": 0.0009950248756218905, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 7, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "a95ffb65726dda96fc05ce00de24feac1ad1df61", - "model_dtype": "torch.float16", - "model_memory_footprint": 2731819056, - "model_num_parameters": 1315577856, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 952.5093954248366, - "min_seq_length": 938, - "max_seq_length": 994, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1146.5093954248366, - "min_seq_length": 1132, - "max_seq_length": 1188, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1310.018080667594, - "min_seq_length": 1028, - "max_seq_length": 2004, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 4, - "non_truncated": 1425, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 7, - "mean_seq_length": 1116.8691392582225, - "min_seq_length": 935, - "max_seq_length": 2884, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9951014695591325 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1021.8707692307693, - "min_seq_length": 988, - "max_seq_length": 1089, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1011.1157142857143, - "min_seq_length": 992, - "max_seq_length": 1223, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 7, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "a95ffb65726dda96fc05ce00de24feac1ad1df61", + "model_dtype": "torch.float16", + "model_memory_footprint": 2731819056, + "model_num_parameters": 1315577856, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 887.1580865603645, - "min_seq_length": 718, - "max_seq_length": 1156, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 952.5093954248366, + "min_seq_length": 938, + "max_seq_length": 994, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1146.5093954248366, + "min_seq_length": 1132, + "max_seq_length": 1188, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1310.018080667594, + "min_seq_length": 1028, + "max_seq_length": 2004, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 4, + "non_truncated": 1425, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 7, + "mean_seq_length": 1116.8691392582225, + "min_seq_length": 935, + "max_seq_length": 2884, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9951014695591325 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1021.8707692307693, + "min_seq_length": 988, + "max_seq_length": 1089, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1011.1157142857143, + "min_seq_length": 992, + "max_seq_length": 1223, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 887.1580865603645, + "min_seq_length": 718, + "max_seq_length": 1156, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1341.930669800235, + "min_seq_length": 1314, + "max_seq_length": 1390, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1312.0273631840796, + "min_seq_length": 1294, + "max_seq_length": 1435, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1341.930669800235, - "min_seq_length": 1314, - "max_seq_length": 1390, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=NOVA-vision-language/GlorIA-1.3B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1312.0273631840796, - "min_seq_length": 1294, - "max_seq_length": 1435, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=NOVA-vision-language/GlorIA-1.3B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/NOVA-vision-language/GlorIA-1.3B/results_2024-03-07T22-34-35.217921.json b/NOVA-vision-language/GlorIA-1.3B/results_2024-03-07T22-34-35.217921.json index 464c7418a03289895a08c1ac21fb3e686ae46ddc..ad5d13595172cdd009c2b9941dfc9c2214e956ae 100644 --- a/NOVA-vision-language/GlorIA-1.3B/results_2024-03-07T22-34-35.217921.json +++ b/NOVA-vision-language/GlorIA-1.3B/results_2024-03-07T22-34-35.217921.json @@ -34,18 +34,18 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.04095462153077381, - "all_grouped_npm": -0.4996940439828647, + "all_grouped_average": 0.054395278270767414, + "all_grouped_npm": -0.47392532585486635, "all_grouped": { "enem_challenge": 0.018894331700489854, "bluex": 0.031988873435326845, "oab_exams": 0.05193621867881549, "assin2_rte": 0.0, "assin2_sts": 0.023212602251989234, - "faquad_nli": 0.0026041666666666665, - "hatebr_offensive": 0.0028436222959357994, - "portuguese_hate_speech": 0.23522853957636566, - "tweetsentbr": 0.0018832391713747645 + "faquad_nli": 0.00390625, + "hatebr_offensive": 0.004265433443903698, + "portuguese_hate_speech": 0.35284280936454854, + "tweetsentbr": 0.002510985561833019 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.018894331700489854, @@ -53,10 +53,10 @@ "harness|oab_exams|oab_exams|None|3": 0.05193621867881549, "harness|assin2_rte|assin2_rte|None|15": 0.0, "harness|assin2_sts|assin2_sts|None|15": 0.023212602251989234, - "harness|faquad_nli|faquad_nli|None|15": 0.0026041666666666665, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.0028436222959357994, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.23522853957636566, - "harness|tweetsentbr|tweetsentbr|None|25": 0.0018832391713747645 + "harness|faquad_nli|faquad_nli|None|15": 0.00390625, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.004265433443903698, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.35284280936454854, + "harness|tweetsentbr|tweetsentbr|None|25": 0.002510985561833019 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.018894331700489854, @@ -135,24 +135,24 @@ "main_score": 0.023212602251989234 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.0026041666666666665, + "f1_macro,all": 0.00390625, "acc,all": 0.003076923076923077, - "main_score": 0.0026041666666666665 + "main_score": 0.00390625 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.0028436222959357994, + "f1_macro,all": 0.004265433443903698, "acc,all": 0.002142857142857143, - "main_score": 0.0028436222959357994 + "main_score": 0.004265433443903698 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.23522853957636566, + "f1_macro,all": 0.35284280936454854, "acc,all": 0.49588719153936545, - "main_score": 0.23522853957636566 + "main_score": 0.35284280936454854 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.0018832391713747645, + "f1_macro,all": 0.002510985561833019, "acc,all": 0.0009950248756218905, - "main_score": 0.0018832391713747645 + "main_score": 0.002510985561833019 } }, "config_tasks": { diff --git a/Nexusflow/Starling-LM-7B-beta/raw_2024-04-01T21-15-54.379246/results.json b/Nexusflow/Starling-LM-7B-beta/raw_2024-04-01T21-15-54.379246/results.json index 747fad215310fdfa7b744a5960ee3dda8736f1de..03bc213162af34be57b022720e5e8aea7a178232 100644 --- a/Nexusflow/Starling-LM-7B-beta/raw_2024-04-01T21-15-54.379246/results.json +++ b/Nexusflow/Starling-LM-7B-beta/raw_2024-04-01T21-15-54.379246/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9256528007689433, - "acc,all": 0.9256535947712419, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8246749931266709, - "mse,all": 0.37589460784313716, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5382475660639777, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__UNICAMP_2023": 0.5116279069767442, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__USP_2022": 0.5918367346938775, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__USP_2020": 0.5892857142857143, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6466060181945417, - "acc,exam_id__2013": 0.6574074074074074, - "acc,exam_id__2012": 0.646551724137931, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2009": 0.6, - "acc,exam_id__2023": 0.674074074074074, - "acc,exam_id__2016_2": 0.6585365853658537, - "acc,exam_id__2010": 0.6410256410256411, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2022": 0.631578947368421, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2017": 0.6724137931034483 - }, - "faquad_nli": { - "f1_macro,all": 0.7748688218404758, - "acc,all": 0.8353846153846154, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8311091883257347, - "acc,all": 0.8342857142857143 - }, - "oab_exams": { - "acc,all": 0.4542141230068337, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2011-04": 0.3625, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2016-19": 0.5256410256410257, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2014-14": 0.575, - "acc,exam_id__2015-17": 0.5769230769230769, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2016-20": 0.475, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2016-21": 0.45, - "acc,exam_id__2012-06a": 0.5125, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2012-08": 0.425, - "acc,exam_id__2013-12": 0.5125, - "acc,exam_id__2012-06": 0.4875, - "acc,exam_id__2015-16": 0.4375, - "acc,exam_id__2013-10": 0.475, - "acc,exam_id__2014-13": 0.375, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2014-15": 0.5384615384615384, - "acc,exam_id__2016-20a": 0.3625, - "acc,exam_id__2010-01": 0.4235294117647059, - "acc,exam_id__2017-22": 0.525, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7137054053375511, - "acc,all": 0.763807285546416 - }, - "tweetsentbr": { - "f1_macro,all": 0.5036962690569555, - "acc,all": 0.7054726368159204, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9256528007689433, + "acc,all": 0.9256535947712419, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8246749931266709, + "mse,all": 0.37589460784313716, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5382475660639777, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__UNICAMP_2023": 0.5116279069767442, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__USP_2022": 0.5918367346938775, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__USP_2020": 0.5892857142857143, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6466060181945417, + "acc,exam_id__2013": 0.6574074074074074, + "acc,exam_id__2012": 0.646551724137931, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2009": 0.6, + "acc,exam_id__2023": 0.674074074074074, + "acc,exam_id__2016_2": 0.6585365853658537, + "acc,exam_id__2010": 0.6410256410256411, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2022": 0.631578947368421, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2017": 0.6724137931034483 + }, + "faquad_nli": { + "f1_macro,all": 0.7748688218404758, + "acc,all": 0.8353846153846154, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8311091883257347, + "acc,all": 0.8342857142857143 + }, + "oab_exams": { + "acc,all": 0.4542141230068337, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2011-04": 0.3625, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2016-19": 0.5256410256410257, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2014-14": 0.575, + "acc,exam_id__2015-17": 0.5769230769230769, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2016-20": 0.475, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2016-21": 0.45, + "acc,exam_id__2012-06a": 0.5125, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2012-08": 0.425, + "acc,exam_id__2013-12": 0.5125, + "acc,exam_id__2012-06": 0.4875, + "acc,exam_id__2015-16": 0.4375, + "acc,exam_id__2013-10": 0.475, + "acc,exam_id__2014-13": 0.375, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2014-15": 0.5384615384615384, + "acc,exam_id__2016-20a": 0.3625, + "acc,exam_id__2010-01": 0.4235294117647059, + "acc,exam_id__2017-22": 0.525, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7137054053375511, + "acc,all": 0.763807285546416 + }, + "tweetsentbr": { + "f1_macro,all": 0.6715950254092741, + "acc,all": 0.7054726368159204, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "aa21e7f117d41f9463b6d48d2e127bbf2e93256d", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14617722880, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1584.7455065359477, - "min_seq_length": 1561, - "max_seq_length": 1651, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1824.7455065359477, - "min_seq_length": 1801, - "max_seq_length": 1891, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1782.9262865090404, - "min_seq_length": 1406, - "max_seq_length": 2583, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1683.039188243527, - "min_seq_length": 1417, - "max_seq_length": 2681, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1825.9876923076922, - "min_seq_length": 1770, - "max_seq_length": 1946, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1676.3878571428572, - "min_seq_length": 1653, - "max_seq_length": 1927, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "aa21e7f117d41f9463b6d48d2e127bbf2e93256d", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14617722880, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1428.764464692483, - "min_seq_length": 1162, - "max_seq_length": 1931, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1584.7455065359477, + "min_seq_length": 1561, + "max_seq_length": 1651, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1824.7455065359477, + "min_seq_length": 1801, + "max_seq_length": 1891, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1782.9262865090404, + "min_seq_length": 1406, + "max_seq_length": 2583, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1683.039188243527, + "min_seq_length": 1417, + "max_seq_length": 2681, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1825.9876923076922, + "min_seq_length": 1770, + "max_seq_length": 1946, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1676.3878571428572, + "min_seq_length": 1653, + "max_seq_length": 1927, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1428.764464692483, + "min_seq_length": 1162, + "max_seq_length": 1931, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2177.3360752056406, + "min_seq_length": 2142, + "max_seq_length": 2216, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1923.2492537313433, + "min_seq_length": 1902, + "max_seq_length": 2018, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2177.3360752056406, - "min_seq_length": 2142, - "max_seq_length": 2216, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Nexusflow/Starling-LM-7B-beta,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1923.2492537313433, - "min_seq_length": 1902, - "max_seq_length": 2018, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Nexusflow/Starling-LM-7B-beta,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/Nexusflow/Starling-LM-7B-beta/results_2024-04-01T21-15-54.379246.json b/Nexusflow/Starling-LM-7B-beta/results_2024-04-01T21-15-54.379246.json index e35a5e21c114297a776a86191c01b9ce8c04ec6b..d53a01eeae78a4cab2ae70e7397d97d06e3e1bd1 100644 --- a/Nexusflow/Starling-LM-7B-beta/results_2024-04-01T21-15-54.379246.json +++ b/Nexusflow/Starling-LM-7B-beta/results_2024-04-01T21-15-54.379246.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6903083539690762, - "all_grouped_npm": 0.541225702715447, + "all_grouped_average": 0.708963771341556, + "all_grouped_npm": 0.5689867404721135, "all_grouped": { "enem_challenge": 0.6466060181945417, "bluex": 0.5382475660639777, @@ -45,7 +45,7 @@ "faquad_nli": 0.7748688218404758, "hatebr_offensive": 0.8311091883257347, "portuguese_hate_speech": 0.7137054053375511, - "tweetsentbr": 0.5036962690569555 + "tweetsentbr": 0.6715950254092741 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6466060181945417, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7748688218404758, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8311091883257347, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7137054053375511, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5036962690569555 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6715950254092741 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6466060181945417, @@ -150,9 +150,9 @@ "main_score": 0.7137054053375511 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5036962690569555, + "f1_macro,all": 0.6715950254092741, "acc,all": 0.7054726368159204, - "main_score": 0.5036962690569555 + "main_score": 0.6715950254092741 } }, "config_tasks": { diff --git a/NousResearch/Nous-Hermes-2-Mistral-7B-DPO/raw_2024-02-27T02-51-13.508742/results.json b/NousResearch/Nous-Hermes-2-Mistral-7B-DPO/raw_2024-02-27T02-51-13.508742/results.json index 4295a2debd20c6ca7afda17461e9535fe5da09e5..b59fc9ad4337480be98db23daa6ec9cdcbd12441 100644 --- a/NousResearch/Nous-Hermes-2-Mistral-7B-DPO/raw_2024-02-27T02-51-13.508742/results.json +++ b/NousResearch/Nous-Hermes-2-Mistral-7B-DPO/raw_2024-02-27T02-51-13.508742/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.601464720105945, - "acc,all": 0.9019607843137255, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6915650379510005, - "mse,all": 0.7275285947712418, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.541029207232267, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__UNICAMP_2023": 0.5348837209302325, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__USP_2018": 0.42592592592592593, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2022": 0.46938775510204084, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6326102169349195, - "acc,exam_id__2016_2": 0.5772357723577236, - "acc,exam_id__2023": 0.6296296296296297, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2009": 0.5826086956521739, - "acc,exam_id__2015": 0.5546218487394958, - "acc,exam_id__2016": 0.6363636363636364, - "acc,exam_id__2022": 0.6390977443609023, - "acc,exam_id__2012": 0.6724137931034483, - "acc,exam_id__2013": 0.6574074074074074, - "acc,exam_id__2011": 0.7264957264957265, - "acc,exam_id__2010": 0.6324786324786325 - }, - "faquad_nli": { - "f1_macro,all": 0.7138364779874213, - "acc,all": 0.8276923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7767581619154933, - "acc,all": 0.785 - }, - "oab_exams": { - "acc,all": 0.43735763097949887, - "acc,exam_id__2012-08": 0.4, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2013-11": 0.4125, - "acc,exam_id__2014-13": 0.3875, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2016-20a": 0.4875, - "acc,exam_id__2012-06a": 0.5125, - "acc,exam_id__2017-23": 0.475, - "acc,exam_id__2014-14": 0.55, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2013-10": 0.4375, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2016-21": 0.4375, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2014-15": 0.5, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2016-19": 0.5256410256410257, - "acc,exam_id__2015-18": 0.3625, - "acc,exam_id__2013-12": 0.525, - "acc,exam_id__2010-02": 0.43, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7090851811137625, - "acc,all": 0.7602820211515864 - }, - "tweetsentbr": { - "f1_macro,all": 0.4521585213804042, - "acc,all": 0.664179104477612, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9021970801589174, + "acc,all": 0.9019607843137255, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6915650379510005, + "mse,all": 0.7275285947712418, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.541029207232267, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__UNICAMP_2023": 0.5348837209302325, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__USP_2018": 0.42592592592592593, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2022": 0.46938775510204084, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6326102169349195, + "acc,exam_id__2016_2": 0.5772357723577236, + "acc,exam_id__2023": 0.6296296296296297, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2009": 0.5826086956521739, + "acc,exam_id__2015": 0.5546218487394958, + "acc,exam_id__2016": 0.6363636363636364, + "acc,exam_id__2022": 0.6390977443609023, + "acc,exam_id__2012": 0.6724137931034483, + "acc,exam_id__2013": 0.6574074074074074, + "acc,exam_id__2011": 0.7264957264957265, + "acc,exam_id__2010": 0.6324786324786325 + }, + "faquad_nli": { + "f1_macro,all": 0.7138364779874213, + "acc,all": 0.8276923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7767581619154933, + "acc,all": 0.785 + }, + "oab_exams": { + "acc,all": 0.43735763097949887, + "acc,exam_id__2012-08": 0.4, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2013-11": 0.4125, + "acc,exam_id__2014-13": 0.3875, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2016-20a": 0.4875, + "acc,exam_id__2012-06a": 0.5125, + "acc,exam_id__2017-23": 0.475, + "acc,exam_id__2014-14": 0.55, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2013-10": 0.4375, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2016-21": 0.4375, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2014-15": 0.5, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2016-19": 0.5256410256410257, + "acc,exam_id__2015-18": 0.3625, + "acc,exam_id__2013-12": 0.525, + "acc,exam_id__2010-02": 0.43, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7090851811137625, + "acc,all": 0.7602820211515864 + }, + "tweetsentbr": { + "f1_macro,all": 0.6028780285072056, + "acc,all": 0.664179104477612, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "b66d6a6b24c9d59639b12b5df43020059f662aaf", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020376064, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "b66d6a6b24c9d59639b12b5df43020059f662aaf", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020376064, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=NousResearch/Nous-Hermes-2-Mistral-7B-DPO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=NousResearch/Nous-Hermes-2-Mistral-7B-DPO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/NousResearch/Nous-Hermes-2-Mistral-7B-DPO/results_2024-02-27T02-51-13.508742.json b/NousResearch/Nous-Hermes-2-Mistral-7B-DPO/results_2024-02-27T02-51-13.508742.json index e382b5e45f6ade1c90d62508df28096e72c51e8f..c98ace2e8cdd29da897b6c4144426fc8fdd20b57 100644 --- a/NousResearch/Nous-Hermes-2-Mistral-7B-DPO/results_2024-02-27T02-51-13.508742.json +++ b/NousResearch/Nous-Hermes-2-Mistral-7B-DPO/results_2024-02-27T02-51-13.508742.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6173183506223013, - "all_grouped_npm": 0.4163010463383218, + "all_grouped_average": 0.6674796691978317, + "all_grouped_npm": 0.5080510131104773, "all_grouped": { "enem_challenge": 0.6326102169349195, "bluex": 0.541029207232267, "oab_exams": 0.43735763097949887, - "assin2_rte": 0.601464720105945, + "assin2_rte": 0.9021970801589174, "assin2_sts": 0.6915650379510005, "faquad_nli": 0.7138364779874213, "hatebr_offensive": 0.7767581619154933, "portuguese_hate_speech": 0.7090851811137625, - "tweetsentbr": 0.4521585213804042 + "tweetsentbr": 0.6028780285072056 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6326102169349195, "harness|bluex|bluex|None|3": 0.541029207232267, "harness|oab_exams|oab_exams|None|3": 0.43735763097949887, - "harness|assin2_rte|assin2_rte|None|15": 0.601464720105945, + "harness|assin2_rte|assin2_rte|None|15": 0.9021970801589174, "harness|assin2_sts|assin2_sts|None|15": 0.6915650379510005, "harness|faquad_nli|faquad_nli|None|15": 0.7138364779874213, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7767581619154933, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7090851811137625, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4521585213804042 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6028780285072056 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6326102169349195, @@ -125,9 +125,9 @@ "main_score": 0.43735763097949887 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.601464720105945, + "f1_macro,all": 0.9021970801589174, "acc,all": 0.9019607843137255, - "main_score": 0.601464720105945 + "main_score": 0.9021970801589174 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.6915650379510005, @@ -150,9 +150,9 @@ "main_score": 0.7090851811137625 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4521585213804042, + "f1_macro,all": 0.6028780285072056, "acc,all": 0.664179104477612, - "main_score": 0.4521585213804042 + "main_score": 0.6028780285072056 } }, "config_tasks": { diff --git a/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-05-57.915014/results.json b/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-05-57.915014/results.json index 9466985d887a856cc24d540e88bbd33fa47701a5..7f59e020cac94fe33e36bfc43155ed53ba5f18da 100644 --- a/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-05-57.915014/results.json +++ b/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-05-57.915014/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.7116215451710273, - "acc,all": 0.7312091503267973, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.03155319076723917, - "mse,all": 2.7832107843137264, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.43671766342141866, - "acc,exam_id__USP_2020": 0.42857142857142855, - "acc,exam_id__USP_2018": 0.3148148148148148, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__UNICAMP_2022": 0.48717948717948717, - "acc,exam_id__UNICAMP_2023": 0.5116279069767442, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2018": 0.3333333333333333, - "acc,exam_id__USP_2023": 0.5, - "acc,exam_id__USP_2024": 0.4146341463414634, - "acc,exam_id__USP_2021": 0.36538461538461536, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__USP_2022": 0.32653061224489793, - "acc,exam_id__UNICAMP_2019": 0.44, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5136459062281316, - "acc,exam_id__2012": 0.49137931034482757, - "acc,exam_id__2023": 0.45925925925925926, - "acc,exam_id__2015": 0.5294117647058824, - "acc,exam_id__2016": 0.5041322314049587, - "acc,exam_id__2010": 0.5897435897435898, - "acc,exam_id__2017": 0.47413793103448276, - "acc,exam_id__2009": 0.4260869565217391, - "acc,exam_id__2022": 0.5037593984962406, - "acc,exam_id__2016_2": 0.5528455284552846, - "acc,exam_id__2014": 0.5229357798165137, - "acc,exam_id__2013": 0.5462962962962963, - "acc,exam_id__2011": 0.5726495726495726 - }, - "faquad_nli": { - "f1_macro,all": 0.5805353659391029, - "acc,all": 0.7307692307692307, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.4646385620718362, - "acc,all": 0.5642857142857143 - }, - "oab_exams": { - "acc,all": 0.3621867881548975, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2016-21": 0.325, - "acc,exam_id__2012-06a": 0.4125, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2014-14": 0.3625, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2017-23": 0.3625, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2017-22": 0.4, - "acc,exam_id__2013-12": 0.375, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2012-07": 0.4125, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2010-02": 0.34, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2013-10": 0.325, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2015-17": 0.358974358974359, - "acc,exam_id__2014-15": 0.4358974358974359, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2011-05": 0.375, - "acc,exam_id__2015-16": 0.3125, - "acc,exam_id__2016-19": 0.358974358974359, - "acc,exam_id__2016-20a": 0.275, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.30384738421304563, - "acc,all": 0.4582843713278496 - }, - "tweetsentbr": { - "f1_macro,all": 0.18339913997994203, - "acc,all": 0.31343283582089554, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.7116215451710273, + "acc,all": 0.7312091503267973, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.03155319076723917, + "mse,all": 2.7832107843137264, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.43671766342141866, + "acc,exam_id__USP_2020": 0.42857142857142855, + "acc,exam_id__USP_2018": 0.3148148148148148, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__UNICAMP_2022": 0.48717948717948717, + "acc,exam_id__UNICAMP_2023": 0.5116279069767442, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2018": 0.3333333333333333, + "acc,exam_id__USP_2023": 0.5, + "acc,exam_id__USP_2024": 0.4146341463414634, + "acc,exam_id__USP_2021": 0.36538461538461536, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__USP_2022": 0.32653061224489793, + "acc,exam_id__UNICAMP_2019": 0.44, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5136459062281316, + "acc,exam_id__2012": 0.49137931034482757, + "acc,exam_id__2023": 0.45925925925925926, + "acc,exam_id__2015": 0.5294117647058824, + "acc,exam_id__2016": 0.5041322314049587, + "acc,exam_id__2010": 0.5897435897435898, + "acc,exam_id__2017": 0.47413793103448276, + "acc,exam_id__2009": 0.4260869565217391, + "acc,exam_id__2022": 0.5037593984962406, + "acc,exam_id__2016_2": 0.5528455284552846, + "acc,exam_id__2014": 0.5229357798165137, + "acc,exam_id__2013": 0.5462962962962963, + "acc,exam_id__2011": 0.5726495726495726 + }, + "faquad_nli": { + "f1_macro,all": 0.5805353659391029, + "acc,all": 0.7307692307692307, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.4646385620718362, + "acc,all": 0.5642857142857143 + }, + "oab_exams": { + "acc,all": 0.3621867881548975, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2016-21": 0.325, + "acc,exam_id__2012-06a": 0.4125, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2014-14": 0.3625, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2017-23": 0.3625, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2017-22": 0.4, + "acc,exam_id__2013-12": 0.375, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2012-07": 0.4125, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2010-02": 0.34, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2013-10": 0.325, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2015-17": 0.358974358974359, + "acc,exam_id__2014-15": 0.4358974358974359, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2011-05": 0.375, + "acc,exam_id__2015-16": 0.3125, + "acc,exam_id__2016-19": 0.358974358974359, + "acc,exam_id__2016-20a": 0.275, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4557710763195685, + "acc,all": 0.4582843713278496 + }, + "tweetsentbr": { + "f1_macro,all": 0.18339913997994203, + "acc,all": 0.31343283582089554, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 120, - "non_truncated": 14030, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 135, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e3032ba89a6df12b801ab3be2a29b59068aa048d", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 13476839424, - "model_num_parameters": 6738415616, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1449.9889705882354, - "min_seq_length": 1427, - "max_seq_length": 1516, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1658.9889705882354, - "min_seq_length": 1636, - "max_seq_length": 1725, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 50, - "non_truncated": 669, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 63, - "mean_seq_length": 1706.7426981919332, - "min_seq_length": 1340, - "max_seq_length": 2466, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9123783031988872 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 18, - "non_truncated": 1411, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 20, - "mean_seq_length": 1585.9881035689293, - "min_seq_length": 1333, - "max_seq_length": 2625, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.986004198740378 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1657.1184615384616, - "min_seq_length": 1605, - "max_seq_length": 1764, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 120, + "non_truncated": 14030, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 135, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e3032ba89a6df12b801ab3be2a29b59068aa048d", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 13476839424, + "model_num_parameters": 6738415616, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1504.9178571428572, - "min_seq_length": 1481, - "max_seq_length": 1751, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1358.4145785876992, - "min_seq_length": 1103, - "max_seq_length": 1840, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1449.9889705882354, + "min_seq_length": 1427, + "max_seq_length": 1516, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1658.9889705882354, + "min_seq_length": 1636, + "max_seq_length": 1725, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 50, + "non_truncated": 669, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 63, + "mean_seq_length": 1706.7426981919332, + "min_seq_length": 1340, + "max_seq_length": 2466, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9123783031988872 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 18, + "non_truncated": 1411, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 20, + "mean_seq_length": 1585.9881035689293, + "min_seq_length": 1333, + "max_seq_length": 2625, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.986004198740378 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1657.1184615384616, + "min_seq_length": 1605, + "max_seq_length": 1764, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1504.9178571428572, + "min_seq_length": 1481, + "max_seq_length": 1751, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1358.4145785876992, + "min_seq_length": 1103, + "max_seq_length": 1840, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 52, + "non_truncated": 799, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 52, + "mean_seq_length": 1996.801410105758, + "min_seq_length": 1962, + "max_seq_length": 2040, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 24.938895417156285 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1771.6845771144278, + "min_seq_length": 1750, + "max_seq_length": 1889, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 52, - "non_truncated": 799, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 52, - "mean_seq_length": 1996.801410105758, - "min_seq_length": 1962, - "max_seq_length": 2040, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 24.938895417156285 + "config": { + "model": "huggingface", + "model_args": "pretrained=OliveiraJLT/Sagui-7B-Instruct-v0.1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1771.6845771144278, - "min_seq_length": 1750, - "max_seq_length": 1889, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=OliveiraJLT/Sagui-7B-Instruct-v0.1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-05-57.992253/results.json b/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-05-57.992253/results.json index 1cb44773700d1fc38feb0025c564225802d87eb0..0f096356409945ad59aa082e23213f1f36196ff3 100644 --- a/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-05-57.992253/results.json +++ b/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-05-57.992253/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.7116215451710273, - "acc,all": 0.7312091503267973, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.03155319076723917, - "mse,all": 2.7832107843137264, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.43671766342141866, - "acc,exam_id__USP_2024": 0.4146341463414634, - "acc,exam_id__USP_2023": 0.5, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__USP_2022": 0.32653061224489793, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__UNICAMP_2018": 0.3333333333333333, - "acc,exam_id__UNICAMP_2019": 0.44, - "acc,exam_id__USP_2018": 0.3148148148148148, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__USP_2020": 0.42857142857142855, - "acc,exam_id__UNICAMP_2023": 0.5116279069767442, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__UNICAMP_2022": 0.48717948717948717, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__USP_2021": 0.36538461538461536, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5136459062281316, - "acc,exam_id__2012": 0.49137931034482757, - "acc,exam_id__2022": 0.5037593984962406, - "acc,exam_id__2009": 0.4260869565217391, - "acc,exam_id__2010": 0.5897435897435898, - "acc,exam_id__2013": 0.5462962962962963, - "acc,exam_id__2015": 0.5294117647058824, - "acc,exam_id__2016": 0.5041322314049587, - "acc,exam_id__2014": 0.5229357798165137, - "acc,exam_id__2017": 0.47413793103448276, - "acc,exam_id__2023": 0.45925925925925926, - "acc,exam_id__2011": 0.5726495726495726, - "acc,exam_id__2016_2": 0.5528455284552846 - }, - "faquad_nli": { - "f1_macro,all": 0.5805353659391029, - "acc,all": 0.7307692307692307, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.4646385620718362, - "acc,all": 0.5642857142857143 - }, - "oab_exams": { - "acc,all": 0.3621867881548975, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2016-20a": 0.275, - "acc,exam_id__2012-07": 0.4125, - "acc,exam_id__2016-21": 0.325, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2016-19": 0.358974358974359, - "acc,exam_id__2013-10": 0.325, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2015-16": 0.3125, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2011-05": 0.375, - "acc,exam_id__2015-17": 0.358974358974359, - "acc,exam_id__2017-22": 0.4, - "acc,exam_id__2017-23": 0.3625, - "acc,exam_id__2014-15": 0.4358974358974359, - "acc,exam_id__2012-06a": 0.4125, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2014-14": 0.3625, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2013-12": 0.375, - "acc,exam_id__2010-02": 0.34, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.30384738421304563, - "acc,all": 0.4582843713278496 - }, - "tweetsentbr": { - "f1_macro,all": 0.18339913997994203, - "acc,all": 0.31343283582089554, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.7116215451710273, + "acc,all": 0.7312091503267973, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.03155319076723917, + "mse,all": 2.7832107843137264, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.43671766342141866, + "acc,exam_id__USP_2024": 0.4146341463414634, + "acc,exam_id__USP_2023": 0.5, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__USP_2022": 0.32653061224489793, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__UNICAMP_2018": 0.3333333333333333, + "acc,exam_id__UNICAMP_2019": 0.44, + "acc,exam_id__USP_2018": 0.3148148148148148, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__USP_2020": 0.42857142857142855, + "acc,exam_id__UNICAMP_2023": 0.5116279069767442, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__UNICAMP_2022": 0.48717948717948717, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__USP_2021": 0.36538461538461536, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5136459062281316, + "acc,exam_id__2012": 0.49137931034482757, + "acc,exam_id__2022": 0.5037593984962406, + "acc,exam_id__2009": 0.4260869565217391, + "acc,exam_id__2010": 0.5897435897435898, + "acc,exam_id__2013": 0.5462962962962963, + "acc,exam_id__2015": 0.5294117647058824, + "acc,exam_id__2016": 0.5041322314049587, + "acc,exam_id__2014": 0.5229357798165137, + "acc,exam_id__2017": 0.47413793103448276, + "acc,exam_id__2023": 0.45925925925925926, + "acc,exam_id__2011": 0.5726495726495726, + "acc,exam_id__2016_2": 0.5528455284552846 + }, + "faquad_nli": { + "f1_macro,all": 0.5805353659391029, + "acc,all": 0.7307692307692307, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.4646385620718362, + "acc,all": 0.5642857142857143 + }, + "oab_exams": { + "acc,all": 0.3621867881548975, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2016-20a": 0.275, + "acc,exam_id__2012-07": 0.4125, + "acc,exam_id__2016-21": 0.325, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2016-19": 0.358974358974359, + "acc,exam_id__2013-10": 0.325, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2015-16": 0.3125, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2011-05": 0.375, + "acc,exam_id__2015-17": 0.358974358974359, + "acc,exam_id__2017-22": 0.4, + "acc,exam_id__2017-23": 0.3625, + "acc,exam_id__2014-15": 0.4358974358974359, + "acc,exam_id__2012-06a": 0.4125, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2014-14": 0.3625, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2013-12": 0.375, + "acc,exam_id__2010-02": 0.34, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4557710763195685, + "acc,all": 0.4582843713278496 + }, + "tweetsentbr": { + "f1_macro,all": 0.18339913997994203, + "acc,all": 0.31343283582089554, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 120, - "non_truncated": 14030, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 135, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e3032ba89a6df12b801ab3be2a29b59068aa048d", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 13476839424, - "model_num_parameters": 6738415616, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1449.9889705882354, - "min_seq_length": 1427, - "max_seq_length": 1516, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1658.9889705882354, - "min_seq_length": 1636, - "max_seq_length": 1725, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 50, - "non_truncated": 669, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 63, - "mean_seq_length": 1706.7426981919332, - "min_seq_length": 1340, - "max_seq_length": 2466, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9123783031988872 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 18, - "non_truncated": 1411, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 20, - "mean_seq_length": 1585.9881035689293, - "min_seq_length": 1333, - "max_seq_length": 2625, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.986004198740378 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1657.1184615384616, - "min_seq_length": 1605, - "max_seq_length": 1764, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 120, + "non_truncated": 14030, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 135, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e3032ba89a6df12b801ab3be2a29b59068aa048d", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 13476839424, + "model_num_parameters": 6738415616, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1504.9178571428572, - "min_seq_length": 1481, - "max_seq_length": 1751, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1358.4145785876992, - "min_seq_length": 1103, - "max_seq_length": 1840, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1449.9889705882354, + "min_seq_length": 1427, + "max_seq_length": 1516, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1658.9889705882354, + "min_seq_length": 1636, + "max_seq_length": 1725, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 50, + "non_truncated": 669, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 63, + "mean_seq_length": 1706.7426981919332, + "min_seq_length": 1340, + "max_seq_length": 2466, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9123783031988872 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 18, + "non_truncated": 1411, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 20, + "mean_seq_length": 1585.9881035689293, + "min_seq_length": 1333, + "max_seq_length": 2625, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.986004198740378 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1657.1184615384616, + "min_seq_length": 1605, + "max_seq_length": 1764, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1504.9178571428572, + "min_seq_length": 1481, + "max_seq_length": 1751, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1358.4145785876992, + "min_seq_length": 1103, + "max_seq_length": 1840, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 52, + "non_truncated": 799, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 52, + "mean_seq_length": 1996.801410105758, + "min_seq_length": 1962, + "max_seq_length": 2040, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 24.938895417156285 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1771.6845771144278, + "min_seq_length": 1750, + "max_seq_length": 1889, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 52, - "non_truncated": 799, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 52, - "mean_seq_length": 1996.801410105758, - "min_seq_length": 1962, - "max_seq_length": 2040, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 24.938895417156285 + "config": { + "model": "huggingface", + "model_args": "pretrained=OliveiraJLT/Sagui-7B-Instruct-v0.1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1771.6845771144278, - "min_seq_length": 1750, - "max_seq_length": 1889, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=OliveiraJLT/Sagui-7B-Instruct-v0.1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-06-00.459222/results.json b/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-06-00.459222/results.json index d480b7a5746a7723c3401966805494d9e9f6b593..fbf3f4ea9a72538c3a38ea2da9cff65d4cfd74c7 100644 --- a/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-06-00.459222/results.json +++ b/OliveiraJLT/Sagui-7B-Instruct-v0.1/raw_2024-07-18T10-06-00.459222/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.7116215451710273, - "acc,all": 0.7312091503267973, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.03155319076723917, - "mse,all": 2.7832107843137264, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.43671766342141866, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__UNICAMP_2018": 0.3333333333333333, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__USP_2020": 0.42857142857142855, - "acc,exam_id__USP_2023": 0.5, - "acc,exam_id__USP_2018": 0.3148148148148148, - "acc,exam_id__UNICAMP_2023": 0.5116279069767442, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2019": 0.44, - "acc,exam_id__UNICAMP_2022": 0.48717948717948717, - "acc,exam_id__USP_2022": 0.32653061224489793, - "acc,exam_id__USP_2024": 0.4146341463414634, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__USP_2021": 0.36538461538461536, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5136459062281316, - "acc,exam_id__2016": 0.5041322314049587, - "acc,exam_id__2017": 0.47413793103448276, - "acc,exam_id__2013": 0.5462962962962963, - "acc,exam_id__2016_2": 0.5528455284552846, - "acc,exam_id__2012": 0.49137931034482757, - "acc,exam_id__2011": 0.5726495726495726, - "acc,exam_id__2014": 0.5229357798165137, - "acc,exam_id__2022": 0.5037593984962406, - "acc,exam_id__2023": 0.45925925925925926, - "acc,exam_id__2015": 0.5294117647058824, - "acc,exam_id__2010": 0.5897435897435898, - "acc,exam_id__2009": 0.4260869565217391 - }, - "faquad_nli": { - "f1_macro,all": 0.5805353659391029, - "acc,all": 0.7307692307692307, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.4646385620718362, - "acc,all": 0.5642857142857143 - }, - "oab_exams": { - "acc,all": 0.3621867881548975, - "acc,exam_id__2013-12": 0.375, - "acc,exam_id__2015-16": 0.3125, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2014-15": 0.4358974358974359, - "acc,exam_id__2016-19": 0.358974358974359, - "acc,exam_id__2011-05": 0.375, - "acc,exam_id__2016-20a": 0.275, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2017-22": 0.4, - "acc,exam_id__2012-06a": 0.4125, - "acc,exam_id__2016-21": 0.325, - "acc,exam_id__2013-10": 0.325, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2015-17": 0.358974358974359, - "acc,exam_id__2017-23": 0.3625, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2010-02": 0.34, - "acc,exam_id__2012-07": 0.4125, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2014-14": 0.3625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.30384738421304563, - "acc,all": 0.4582843713278496 - }, - "tweetsentbr": { - "f1_macro,all": 0.18339913997994203, - "acc,all": 0.31343283582089554, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.7116215451710273, + "acc,all": 0.7312091503267973, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.03155319076723917, + "mse,all": 2.7832107843137264, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.43671766342141866, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__UNICAMP_2018": 0.3333333333333333, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__USP_2020": 0.42857142857142855, + "acc,exam_id__USP_2023": 0.5, + "acc,exam_id__USP_2018": 0.3148148148148148, + "acc,exam_id__UNICAMP_2023": 0.5116279069767442, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2019": 0.44, + "acc,exam_id__UNICAMP_2022": 0.48717948717948717, + "acc,exam_id__USP_2022": 0.32653061224489793, + "acc,exam_id__USP_2024": 0.4146341463414634, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__USP_2021": 0.36538461538461536, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5136459062281316, + "acc,exam_id__2016": 0.5041322314049587, + "acc,exam_id__2017": 0.47413793103448276, + "acc,exam_id__2013": 0.5462962962962963, + "acc,exam_id__2016_2": 0.5528455284552846, + "acc,exam_id__2012": 0.49137931034482757, + "acc,exam_id__2011": 0.5726495726495726, + "acc,exam_id__2014": 0.5229357798165137, + "acc,exam_id__2022": 0.5037593984962406, + "acc,exam_id__2023": 0.45925925925925926, + "acc,exam_id__2015": 0.5294117647058824, + "acc,exam_id__2010": 0.5897435897435898, + "acc,exam_id__2009": 0.4260869565217391 + }, + "faquad_nli": { + "f1_macro,all": 0.5805353659391029, + "acc,all": 0.7307692307692307, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.4646385620718362, + "acc,all": 0.5642857142857143 + }, + "oab_exams": { + "acc,all": 0.3621867881548975, + "acc,exam_id__2013-12": 0.375, + "acc,exam_id__2015-16": 0.3125, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2014-15": 0.4358974358974359, + "acc,exam_id__2016-19": 0.358974358974359, + "acc,exam_id__2011-05": 0.375, + "acc,exam_id__2016-20a": 0.275, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2017-22": 0.4, + "acc,exam_id__2012-06a": 0.4125, + "acc,exam_id__2016-21": 0.325, + "acc,exam_id__2013-10": 0.325, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2015-17": 0.358974358974359, + "acc,exam_id__2017-23": 0.3625, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2010-02": 0.34, + "acc,exam_id__2012-07": 0.4125, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2014-14": 0.3625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4557710763195685, + "acc,all": 0.4582843713278496 + }, + "tweetsentbr": { + "f1_macro,all": 0.18339913997994203, + "acc,all": 0.31343283582089554, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 120, - "non_truncated": 14030, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 135, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e3032ba89a6df12b801ab3be2a29b59068aa048d", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 13476839424, - "model_num_parameters": 6738415616, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1449.9889705882354, - "min_seq_length": 1427, - "max_seq_length": 1516, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1658.9889705882354, - "min_seq_length": 1636, - "max_seq_length": 1725, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 50, - "non_truncated": 669, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 63, - "mean_seq_length": 1706.7426981919332, - "min_seq_length": 1340, - "max_seq_length": 2466, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9123783031988872 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 18, - "non_truncated": 1411, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 20, - "mean_seq_length": 1585.9881035689293, - "min_seq_length": 1333, - "max_seq_length": 2625, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.986004198740378 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1657.1184615384616, - "min_seq_length": 1605, - "max_seq_length": 1764, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 120, + "non_truncated": 14030, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 135, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e3032ba89a6df12b801ab3be2a29b59068aa048d", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 13476839424, + "model_num_parameters": 6738415616, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1504.9178571428572, - "min_seq_length": 1481, - "max_seq_length": 1751, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1358.4145785876992, - "min_seq_length": 1103, - "max_seq_length": 1840, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1449.9889705882354, + "min_seq_length": 1427, + "max_seq_length": 1516, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1658.9889705882354, + "min_seq_length": 1636, + "max_seq_length": 1725, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 50, + "non_truncated": 669, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 63, + "mean_seq_length": 1706.7426981919332, + "min_seq_length": 1340, + "max_seq_length": 2466, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9123783031988872 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 18, + "non_truncated": 1411, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 20, + "mean_seq_length": 1585.9881035689293, + "min_seq_length": 1333, + "max_seq_length": 2625, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.986004198740378 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1657.1184615384616, + "min_seq_length": 1605, + "max_seq_length": 1764, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1504.9178571428572, + "min_seq_length": 1481, + "max_seq_length": 1751, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1358.4145785876992, + "min_seq_length": 1103, + "max_seq_length": 1840, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 52, + "non_truncated": 799, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 52, + "mean_seq_length": 1996.801410105758, + "min_seq_length": 1962, + "max_seq_length": 2040, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 24.938895417156285 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1771.6845771144278, + "min_seq_length": 1750, + "max_seq_length": 1889, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 52, - "non_truncated": 799, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 52, - "mean_seq_length": 1996.801410105758, - "min_seq_length": 1962, - "max_seq_length": 2040, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 24.938895417156285 + "config": { + "model": "huggingface", + "model_args": "pretrained=OliveiraJLT/Sagui-7B-Instruct-v0.1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1771.6845771144278, - "min_seq_length": 1750, - "max_seq_length": 1889, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=OliveiraJLT/Sagui-7B-Instruct-v0.1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-05-57.915014.json b/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-05-57.915014.json index c60492c84cddc6b6f99ba1dd2f7fa7c8bd71bdca..803b9270a326dd21331edda6d5af3216042a5d6c 100644 --- a/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-05-57.915014.json +++ b/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-05-57.915014.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.3986828384385156, - "all_grouped_npm": 0.0973841405471487, + "all_grouped_average": 0.41556324867257366, + "all_grouped_npm": 0.1297841601902544, "all_grouped": { "enem_challenge": 0.5136459062281316, "bluex": 0.43671766342141866, @@ -44,7 +44,7 @@ "assin2_sts": 0.03155319076723917, "faquad_nli": 0.5805353659391029, "hatebr_offensive": 0.4646385620718362, - "portuguese_hate_speech": 0.30384738421304563, + "portuguese_hate_speech": 0.4557710763195685, "tweetsentbr": 0.18339913997994203 }, "all": { @@ -55,7 +55,7 @@ "harness|assin2_sts|assin2_sts|None|15": 0.03155319076723917, "harness|faquad_nli|faquad_nli|None|15": 0.5805353659391029, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4646385620718362, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.30384738421304563, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4557710763195685, "harness|tweetsentbr|tweetsentbr|None|25": 0.18339913997994203 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -145,9 +145,9 @@ "main_score": 0.4646385620718362 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.30384738421304563, + "f1_macro,all": 0.4557710763195685, "acc,all": 0.4582843713278496, - "main_score": 0.30384738421304563 + "main_score": 0.4557710763195685 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.18339913997994203, diff --git a/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-05-57.992253.json b/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-05-57.992253.json index 5df03ec7d41efff1780aec4bb7b6bbe974bbd18e..07731313f829f8838085c3481dfc2cee528c5530 100644 --- a/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-05-57.992253.json +++ b/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-05-57.992253.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.3986828384385156, - "all_grouped_npm": 0.0973841405471487, + "all_grouped_average": 0.41556324867257366, + "all_grouped_npm": 0.1297841601902544, "all_grouped": { "enem_challenge": 0.5136459062281316, "bluex": 0.43671766342141866, @@ -44,7 +44,7 @@ "assin2_sts": 0.03155319076723917, "faquad_nli": 0.5805353659391029, "hatebr_offensive": 0.4646385620718362, - "portuguese_hate_speech": 0.30384738421304563, + "portuguese_hate_speech": 0.4557710763195685, "tweetsentbr": 0.18339913997994203 }, "all": { @@ -55,7 +55,7 @@ "harness|assin2_sts|assin2_sts|None|15": 0.03155319076723917, "harness|faquad_nli|faquad_nli|None|15": 0.5805353659391029, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4646385620718362, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.30384738421304563, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4557710763195685, "harness|tweetsentbr|tweetsentbr|None|25": 0.18339913997994203 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -145,9 +145,9 @@ "main_score": 0.4646385620718362 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.30384738421304563, + "f1_macro,all": 0.4557710763195685, "acc,all": 0.4582843713278496, - "main_score": 0.30384738421304563 + "main_score": 0.4557710763195685 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.18339913997994203, diff --git a/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-06-00.459222.json b/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-06-00.459222.json index c8aea5d4d3cc7e80ca00120dcd339a6a83173aa6..1b4d4c6f40e0cc0095610a7fe5fab9563905fc44 100644 --- a/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-06-00.459222.json +++ b/OliveiraJLT/Sagui-7B-Instruct-v0.1/results_2024-07-18T10-06-00.459222.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.3986828384385156, - "all_grouped_npm": 0.0973841405471487, + "all_grouped_average": 0.41556324867257366, + "all_grouped_npm": 0.1297841601902544, "all_grouped": { "enem_challenge": 0.5136459062281316, "bluex": 0.43671766342141866, @@ -44,7 +44,7 @@ "assin2_sts": 0.03155319076723917, "faquad_nli": 0.5805353659391029, "hatebr_offensive": 0.4646385620718362, - "portuguese_hate_speech": 0.30384738421304563, + "portuguese_hate_speech": 0.4557710763195685, "tweetsentbr": 0.18339913997994203 }, "all": { @@ -55,7 +55,7 @@ "harness|assin2_sts|assin2_sts|None|15": 0.03155319076723917, "harness|faquad_nli|faquad_nli|None|15": 0.5805353659391029, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4646385620718362, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.30384738421304563, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4557710763195685, "harness|tweetsentbr|tweetsentbr|None|25": 0.18339913997994203 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -145,9 +145,9 @@ "main_score": 0.4646385620718362 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.30384738421304563, + "f1_macro,all": 0.4557710763195685, "acc,all": 0.4582843713278496, - "main_score": 0.30384738421304563 + "main_score": 0.4557710763195685 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.18339913997994203, diff --git a/Qwen/Qwen-1_8B-Chat/raw_2024-02-23T15-51-23.035475/results.json b/Qwen/Qwen-1_8B-Chat/raw_2024-02-23T15-51-23.035475/results.json index 7dc500d2629667668b8b6abfebe88dc382676a90..94ee3eea5e1fd3d02318008033d1782f7e9c5e7c 100644 --- a/Qwen/Qwen-1_8B-Chat/raw_2024-02-23T15-51-23.035475/results.json +++ b/Qwen/Qwen-1_8B-Chat/raw_2024-02-23T15-51-23.035475/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.6805974700732741, - "acc,all": 0.6850490196078431, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.2428472411987268, - "mse,all": 1.7007271241830062, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.011126564673157162, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__USP_2021": 0.057692307692307696, - "acc,exam_id__UNICAMP_2020": 0.01818181818181818, - "acc,exam_id__USP_2018": 0.0, - "acc,exam_id__USP_2024": 0.024390243902439025, - "acc,exam_id__USP_2023": 0.0, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__UNICAMP_2019": 0.02, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.021739130434782608, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__USP_2022": 0.02040816326530612, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__USP_2020": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.18124562631210636, - "acc,exam_id__2017": 0.1810344827586207, - "acc,exam_id__2010": 0.11965811965811966, - "acc,exam_id__2015": 0.20168067226890757, - "acc,exam_id__2011": 0.1623931623931624, - "acc,exam_id__2016_2": 0.13821138211382114, - "acc,exam_id__2022": 0.2631578947368421, - "acc,exam_id__2023": 0.26666666666666666, - "acc,exam_id__2013": 0.1574074074074074, - "acc,exam_id__2009": 0.16521739130434782, - "acc,exam_id__2012": 0.12931034482758622, - "acc,exam_id__2016": 0.21487603305785125, - "acc,exam_id__2014": 0.14678899082568808 - }, - "faquad_nli": { - "f1_macro,all": 0.29076165653055314, - "acc,all": 0.7723076923076924, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6560055396510498, - "acc,all": 0.6621428571428571 - }, - "oab_exams": { - "acc,all": 0.269248291571754, - "acc,exam_id__2016-20": 0.2375, - "acc,exam_id__2016-21": 0.2875, - "acc,exam_id__2015-16": 0.2125, - "acc,exam_id__2011-04": 0.2625, - "acc,exam_id__2011-03": 0.25252525252525254, - "acc,exam_id__2012-06": 0.25, - "acc,exam_id__2016-19": 0.2564102564102564, - "acc,exam_id__2016-20a": 0.2375, - "acc,exam_id__2011-05": 0.2625, - "acc,exam_id__2010-02": 0.26, - "acc,exam_id__2012-06a": 0.2625, - "acc,exam_id__2015-17": 0.23076923076923078, - "acc,exam_id__2012-07": 0.3125, - "acc,exam_id__2013-11": 0.325, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2014-14": 0.325, - "acc,exam_id__2017-23": 0.25, - "acc,exam_id__2010-01": 0.23529411764705882, - "acc,exam_id__2015-18": 0.2125, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2013-12": 0.275, - "acc,exam_id__2013-10": 0.3125, - "acc,exam_id__2017-24": 0.2875, - "acc,exam_id__2017-22": 0.2375, - "acc,exam_id__2012-09": 0.2987012987012987, - "acc,exam_id__2014-15": 0.32051282051282054, - "acc,exam_id__2014-13": 0.25, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5738712001856398, - "acc,all": 0.6145710928319624 - }, - "tweetsentbr": { - "f1_macro,all": 0.48306623914561087, - "acc,all": 0.5537313432835821, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6805974700732741, + "acc,all": 0.6850490196078431, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.2428472411987268, + "mse,all": 1.7007271241830062, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.011126564673157162, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__USP_2021": 0.057692307692307696, + "acc,exam_id__UNICAMP_2020": 0.01818181818181818, + "acc,exam_id__USP_2018": 0.0, + "acc,exam_id__USP_2024": 0.024390243902439025, + "acc,exam_id__USP_2023": 0.0, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__UNICAMP_2019": 0.02, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.021739130434782608, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__USP_2022": 0.02040816326530612, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__USP_2020": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.18124562631210636, + "acc,exam_id__2017": 0.1810344827586207, + "acc,exam_id__2010": 0.11965811965811966, + "acc,exam_id__2015": 0.20168067226890757, + "acc,exam_id__2011": 0.1623931623931624, + "acc,exam_id__2016_2": 0.13821138211382114, + "acc,exam_id__2022": 0.2631578947368421, + "acc,exam_id__2023": 0.26666666666666666, + "acc,exam_id__2013": 0.1574074074074074, + "acc,exam_id__2009": 0.16521739130434782, + "acc,exam_id__2012": 0.12931034482758622, + "acc,exam_id__2016": 0.21487603305785125, + "acc,exam_id__2014": 0.14678899082568808 + }, + "faquad_nli": { + "f1_macro,all": 0.43614248479582973, + "acc,all": 0.7723076923076924, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6560055396510498, + "acc,all": 0.6621428571428571 + }, + "oab_exams": { + "acc,all": 0.269248291571754, + "acc,exam_id__2016-20": 0.2375, + "acc,exam_id__2016-21": 0.2875, + "acc,exam_id__2015-16": 0.2125, + "acc,exam_id__2011-04": 0.2625, + "acc,exam_id__2011-03": 0.25252525252525254, + "acc,exam_id__2012-06": 0.25, + "acc,exam_id__2016-19": 0.2564102564102564, + "acc,exam_id__2016-20a": 0.2375, + "acc,exam_id__2011-05": 0.2625, + "acc,exam_id__2010-02": 0.26, + "acc,exam_id__2012-06a": 0.2625, + "acc,exam_id__2015-17": 0.23076923076923078, + "acc,exam_id__2012-07": 0.3125, + "acc,exam_id__2013-11": 0.325, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2014-14": 0.325, + "acc,exam_id__2017-23": 0.25, + "acc,exam_id__2010-01": 0.23529411764705882, + "acc,exam_id__2015-18": 0.2125, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2013-12": 0.275, + "acc,exam_id__2013-10": 0.3125, + "acc,exam_id__2017-24": 0.2875, + "acc,exam_id__2017-22": 0.2375, + "acc,exam_id__2012-09": 0.2987012987012987, + "acc,exam_id__2014-15": 0.32051282051282054, + "acc,exam_id__2014-13": 0.25, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5738712001856398, + "acc,all": 0.6145710928319624 + }, + "tweetsentbr": { + "f1_macro,all": 0.48306623914561087, + "acc,all": 0.5537313432835821, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "1d0f68de57b88cfde81f3c3e537f24464d889081", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 3675230336, - "model_num_parameters": 1836828672, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1159.2818627450981, - "min_seq_length": 1140, - "max_seq_length": 1221, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1365.2818627450981, - "min_seq_length": 1346, - "max_seq_length": 1427, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1455.2378303198886, - "min_seq_length": 1135, - "max_seq_length": 2106, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1375.7809657102869, - "min_seq_length": 1145, - "max_seq_length": 2336, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1290.6876923076923, - "min_seq_length": 1246, - "max_seq_length": 1386, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1007.6992857142857, - "min_seq_length": 989, - "max_seq_length": 1228, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "1d0f68de57b88cfde81f3c3e537f24464d889081", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 3675230336, + "model_num_parameters": 1836828672, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1163.888382687927, - "min_seq_length": 934, - "max_seq_length": 1577, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1159.2818627450981, + "min_seq_length": 1140, + "max_seq_length": 1221, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1365.2818627450981, + "min_seq_length": 1346, + "max_seq_length": 1427, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1455.2378303198886, + "min_seq_length": 1135, + "max_seq_length": 2106, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1375.7809657102869, + "min_seq_length": 1145, + "max_seq_length": 2336, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1290.6876923076923, + "min_seq_length": 1246, + "max_seq_length": 1386, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1007.6992857142857, + "min_seq_length": 989, + "max_seq_length": 1228, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1163.888382687927, + "min_seq_length": 934, + "max_seq_length": 1577, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1450.62044653349, + "min_seq_length": 1420, + "max_seq_length": 1484, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1301.0567164179104, + "min_seq_length": 1283, + "max_seq_length": 1349, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1450.62044653349, - "min_seq_length": 1420, - "max_seq_length": 1484, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Qwen/Qwen-1_8B-Chat,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1301.0567164179104, - "min_seq_length": 1283, - "max_seq_length": 1349, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Qwen/Qwen-1_8B-Chat,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/Qwen/Qwen-1_8B-Chat/raw_2024-04-20T18-02-22.288187/results.json b/Qwen/Qwen-1_8B-Chat/raw_2024-04-20T18-02-22.288187/results.json index d9d79490412b0d6f7073d2b47153aad9fe379793..a9d0f67bfb718b9e93ba501546aae6b16571f809 100644 --- a/Qwen/Qwen-1_8B-Chat/raw_2024-04-20T18-02-22.288187/results.json +++ b/Qwen/Qwen-1_8B-Chat/raw_2024-04-20T18-02-22.288187/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.6789403882059499, - "acc,all": 0.6834150326797386, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.2482452669328401, - "mse,all": 1.6897017973856208, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.011126564673157162, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__USP_2018": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__USP_2021": 0.057692307692307696, - "acc,exam_id__UNICAMP_2019": 0.02, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__USP_2022": 0.02040816326530612, - "acc,exam_id__USP_2020": 0.0, - "acc,exam_id__UNICAMP_2020": 0.01818181818181818, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__USP_2023": 0.0, - "acc,exam_id__USP_2024": 0.024390243902439025, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.021739130434782608, - "acc,exam_id__UNICAMP_2024": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.1861441567529741, - "acc,exam_id__2015": 0.21008403361344538, - "acc,exam_id__2013": 0.1574074074074074, - "acc,exam_id__2017": 0.1810344827586207, - "acc,exam_id__2023": 0.26666666666666666, - "acc,exam_id__2011": 0.18803418803418803, - "acc,exam_id__2016": 0.21487603305785125, - "acc,exam_id__2009": 0.1826086956521739, - "acc,exam_id__2012": 0.13793103448275862, - "acc,exam_id__2014": 0.14678899082568808, - "acc,exam_id__2022": 0.2631578947368421, - "acc,exam_id__2016_2": 0.13821138211382114, - "acc,exam_id__2010": 0.11965811965811966 - }, - "faquad_nli": { - "f1_macro,all": 0.2910144927536232, - "acc,all": 0.7723076923076924, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6567993003026897, - "acc,all": 0.6628571428571428 - }, - "oab_exams": { - "acc,all": 0.27015945330296126, - "acc,exam_id__2010-01": 0.23529411764705882, - "acc,exam_id__2015-17": 0.24358974358974358, - "acc,exam_id__2011-03": 0.25252525252525254, - "acc,exam_id__2014-14": 0.325, - "acc,exam_id__2014-13": 0.25, - "acc,exam_id__2014-15": 0.32051282051282054, - "acc,exam_id__2011-05": 0.2625, - "acc,exam_id__2016-20a": 0.2375, - "acc,exam_id__2016-21": 0.2875, - "acc,exam_id__2012-06": 0.25, - "acc,exam_id__2013-12": 0.2875, - "acc,exam_id__2017-22": 0.2375, - "acc,exam_id__2012-06a": 0.2625, - "acc,exam_id__2016-19": 0.2564102564102564, - "acc,exam_id__2010-02": 0.26, - "acc,exam_id__2012-09": 0.2987012987012987, - "acc,exam_id__2016-20": 0.2375, - "acc,exam_id__2015-18": 0.2125, - "acc,exam_id__2017-23": 0.25, - "acc,exam_id__2017-24": 0.2875, - "acc,exam_id__2011-04": 0.2625, - "acc,exam_id__2015-16": 0.2125, - "acc,exam_id__2013-10": 0.3125, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2012-07": 0.3125, - "acc,exam_id__2013-11": 0.325, - "acc,exam_id__2018-25": 0.2875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5796176975945018, - "acc,all": 0.6216216216216216 - }, - "tweetsentbr": { - "f1_macro,all": 0.3813437905987532, - "acc,all": 0.4845771144278607, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6789403882059499, + "acc,all": 0.6834150326797386, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.2482452669328401, + "mse,all": 1.6897017973856208, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.011126564673157162, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__USP_2018": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__USP_2021": 0.057692307692307696, + "acc,exam_id__UNICAMP_2019": 0.02, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__USP_2022": 0.02040816326530612, + "acc,exam_id__USP_2020": 0.0, + "acc,exam_id__UNICAMP_2020": 0.01818181818181818, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__USP_2023": 0.0, + "acc,exam_id__USP_2024": 0.024390243902439025, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.021739130434782608, + "acc,exam_id__UNICAMP_2024": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.1861441567529741, + "acc,exam_id__2015": 0.21008403361344538, + "acc,exam_id__2013": 0.1574074074074074, + "acc,exam_id__2017": 0.1810344827586207, + "acc,exam_id__2023": 0.26666666666666666, + "acc,exam_id__2011": 0.18803418803418803, + "acc,exam_id__2016": 0.21487603305785125, + "acc,exam_id__2009": 0.1826086956521739, + "acc,exam_id__2012": 0.13793103448275862, + "acc,exam_id__2014": 0.14678899082568808, + "acc,exam_id__2022": 0.2631578947368421, + "acc,exam_id__2016_2": 0.13821138211382114, + "acc,exam_id__2010": 0.11965811965811966 + }, + "faquad_nli": { + "f1_macro,all": 0.43652173913043485, + "acc,all": 0.7723076923076924, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6567993003026897, + "acc,all": 0.6628571428571428 + }, + "oab_exams": { + "acc,all": 0.27015945330296126, + "acc,exam_id__2010-01": 0.23529411764705882, + "acc,exam_id__2015-17": 0.24358974358974358, + "acc,exam_id__2011-03": 0.25252525252525254, + "acc,exam_id__2014-14": 0.325, + "acc,exam_id__2014-13": 0.25, + "acc,exam_id__2014-15": 0.32051282051282054, + "acc,exam_id__2011-05": 0.2625, + "acc,exam_id__2016-20a": 0.2375, + "acc,exam_id__2016-21": 0.2875, + "acc,exam_id__2012-06": 0.25, + "acc,exam_id__2013-12": 0.2875, + "acc,exam_id__2017-22": 0.2375, + "acc,exam_id__2012-06a": 0.2625, + "acc,exam_id__2016-19": 0.2564102564102564, + "acc,exam_id__2010-02": 0.26, + "acc,exam_id__2012-09": 0.2987012987012987, + "acc,exam_id__2016-20": 0.2375, + "acc,exam_id__2015-18": 0.2125, + "acc,exam_id__2017-23": 0.25, + "acc,exam_id__2017-24": 0.2875, + "acc,exam_id__2011-04": 0.2625, + "acc,exam_id__2015-16": 0.2125, + "acc,exam_id__2013-10": 0.3125, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2012-07": 0.3125, + "acc,exam_id__2013-11": 0.325, + "acc,exam_id__2018-25": 0.2875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5796176975945018, + "acc,all": 0.6216216216216216 + }, + "tweetsentbr": { + "f1_macro,all": 0.3813437905987532, + "acc,all": 0.4845771144278607, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "1d0f68de57b88cfde81f3c3e537f24464d889081", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 3675230336, - "model_num_parameters": 1836828672, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1159.2818627450981, - "min_seq_length": 1140, - "max_seq_length": 1221, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1365.2818627450981, - "min_seq_length": 1346, - "max_seq_length": 1427, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1455.2378303198886, - "min_seq_length": 1135, - "max_seq_length": 2106, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1375.7809657102869, - "min_seq_length": 1145, - "max_seq_length": 2336, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1290.6876923076923, - "min_seq_length": 1246, - "max_seq_length": 1386, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "1d0f68de57b88cfde81f3c3e537f24464d889081", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 3675230336, + "model_num_parameters": 1836828672, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1007.6992857142857, - "min_seq_length": 989, - "max_seq_length": 1228, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1163.888382687927, - "min_seq_length": 934, - "max_seq_length": 1577, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1159.2818627450981, + "min_seq_length": 1140, + "max_seq_length": 1221, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1365.2818627450981, + "min_seq_length": 1346, + "max_seq_length": 1427, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1455.2378303198886, + "min_seq_length": 1135, + "max_seq_length": 2106, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1375.7809657102869, + "min_seq_length": 1145, + "max_seq_length": 2336, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1290.6876923076923, + "min_seq_length": 1246, + "max_seq_length": 1386, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1007.6992857142857, + "min_seq_length": 989, + "max_seq_length": 1228, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1163.888382687927, + "min_seq_length": 934, + "max_seq_length": 1577, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1450.62044653349, + "min_seq_length": 1420, + "max_seq_length": 1484, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1411.0567164179104, + "min_seq_length": 1393, + "max_seq_length": 1459, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1450.62044653349, - "min_seq_length": 1420, - "max_seq_length": 1484, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Qwen/Qwen-1_8B-Chat,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1411.0567164179104, - "min_seq_length": 1393, - "max_seq_length": 1459, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Qwen/Qwen-1_8B-Chat,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/Qwen/Qwen-1_8B-Chat/results_2024-02-23T15-51-23.035475.json b/Qwen/Qwen-1_8B-Chat/results_2024-02-23T15-51-23.035475.json index 159674de7980b1721a4682da6d7954437fc3c818..9aa7b0281c6d9985e223a66075fc1a15785db85f 100644 --- a/Qwen/Qwen-1_8B-Chat/results_2024-02-23T15-51-23.035475.json +++ b/Qwen/Qwen-1_8B-Chat/results_2024-02-23T15-51-23.035475.json @@ -34,15 +34,15 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.3765299810379858, - "all_grouped_npm": 0.08348997450998025, + "all_grouped_average": 0.39268340640079424, + "all_grouped_npm": 0.11318377113279003, "all_grouped": { "enem_challenge": 0.18124562631210636, "bluex": 0.011126564673157162, "oab_exams": 0.269248291571754, "assin2_rte": 0.6805974700732741, "assin2_sts": 0.2428472411987268, - "faquad_nli": 0.29076165653055314, + "faquad_nli": 0.43614248479582973, "hatebr_offensive": 0.6560055396510498, "portuguese_hate_speech": 0.5738712001856398, "tweetsentbr": 0.48306623914561087 @@ -53,7 +53,7 @@ "harness|oab_exams|oab_exams|None|3": 0.269248291571754, "harness|assin2_rte|assin2_rte|None|15": 0.6805974700732741, "harness|assin2_sts|assin2_sts|None|15": 0.2428472411987268, - "harness|faquad_nli|faquad_nli|None|15": 0.29076165653055314, + "harness|faquad_nli|faquad_nli|None|15": 0.43614248479582973, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6560055396510498, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5738712001856398, "harness|tweetsentbr|tweetsentbr|None|25": 0.48306623914561087 @@ -135,9 +135,9 @@ "main_score": 0.2428472411987268 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.29076165653055314, + "f1_macro,all": 0.43614248479582973, "acc,all": 0.7723076923076924, - "main_score": 0.29076165653055314 + "main_score": 0.43614248479582973 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.6560055396510498, diff --git a/Qwen/Qwen-1_8B-Chat/results_2024-04-20T18-02-22.288187.json b/Qwen/Qwen-1_8B-Chat/results_2024-04-20T18-02-22.288187.json index e3d42ba2d705531d6c52fe5a49d07af36d5cc7c3..2ad4c8bd87837c787dc65dee5c7478071a62249a 100644 --- a/Qwen/Qwen-1_8B-Chat/results_2024-04-20T18-02-22.288187.json +++ b/Qwen/Qwen-1_8B-Chat/results_2024-04-20T18-02-22.288187.json @@ -34,15 +34,15 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.36704345679082784, - "all_grouped_npm": 0.06917122510414368, + "all_grouped_average": 0.38321092861047357, + "all_grouped_npm": 0.09889084241966893, "all_grouped": { "enem_challenge": 0.1861441567529741, "bluex": 0.011126564673157162, "oab_exams": 0.27015945330296126, "assin2_rte": 0.6789403882059499, "assin2_sts": 0.2482452669328401, - "faquad_nli": 0.2910144927536232, + "faquad_nli": 0.43652173913043485, "hatebr_offensive": 0.6567993003026897, "portuguese_hate_speech": 0.5796176975945018, "tweetsentbr": 0.3813437905987532 @@ -53,7 +53,7 @@ "harness|oab_exams|oab_exams|None|3": 0.27015945330296126, "harness|assin2_rte|assin2_rte|None|15": 0.6789403882059499, "harness|assin2_sts|assin2_sts|None|15": 0.2482452669328401, - "harness|faquad_nli|faquad_nli|None|15": 0.2910144927536232, + "harness|faquad_nli|faquad_nli|None|15": 0.43652173913043485, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6567993003026897, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5796176975945018, "harness|tweetsentbr|tweetsentbr|None|25": 0.3813437905987532 @@ -135,9 +135,9 @@ "main_score": 0.2482452669328401 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.2910144927536232, + "f1_macro,all": 0.43652173913043485, "acc,all": 0.7723076923076924, - "main_score": 0.2910144927536232 + "main_score": 0.43652173913043485 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.6567993003026897, diff --git a/Qwen/Qwen-72B-Chat/raw_2024-03-04T09-41-26.834248/results.json b/Qwen/Qwen-72B-Chat/raw_2024-03-04T09-41-26.834248/results.json index 50a2199630e6001aa5a77ffb97c8fcb558b5e33d..8e2398e8f6098ea32c2f1714b1b413657fe9e432 100644 --- a/Qwen/Qwen-72B-Chat/raw_2024-03-04T09-41-26.834248/results.json +++ b/Qwen/Qwen-72B-Chat/raw_2024-03-04T09-41-26.834248/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.45209339774557167, - "acc,all": 0.5992647058823529, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0, - "mse,all": 3.062949346405229, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.006954102920723227, - "acc,exam_id__USP_2020": 0.0, - "acc,exam_id__UNICAMP_2019": 0.0, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__USP_2022": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__USP_2021": 0.038461538461538464, - "acc,exam_id__UNICAMP_2018": 0.018518518518518517, - "acc,exam_id__USP_2018": 0.018518518518518517, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__USP_2023": 0.022727272727272728, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__USP_2024": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.0006997900629811056, - "acc,exam_id__2016_2": 0.0, - "acc,exam_id__2022": 0.0, - "acc,exam_id__2016": 0.0, - "acc,exam_id__2010": 0.0, - "acc,exam_id__2009": 0.0, - "acc,exam_id__2015": 0.008403361344537815, - "acc,exam_id__2013": 0.0, - "acc,exam_id__2012": 0.0, - "acc,exam_id__2017": 0.0, - "acc,exam_id__2011": 0.0, - "acc,exam_id__2023": 0.0, - "acc,exam_id__2014": 0.0 - }, - "faquad_nli": { - "f1_macro,all": 0.06975747989982865, - "acc,all": 0.08153846153846153, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8440869615900215, - "acc,all": 0.8471428571428572 - }, - "oab_exams": { - "acc,all": 0.0, - "acc,exam_id__2014-14": 0.0, - "acc,exam_id__2015-18": 0.0, - "acc,exam_id__2010-01": 0.0, - "acc,exam_id__2012-06a": 0.0, - "acc,exam_id__2012-09": 0.0, - "acc,exam_id__2013-12": 0.0, - "acc,exam_id__2016-19": 0.0, - "acc,exam_id__2012-07": 0.0, - "acc,exam_id__2014-13": 0.0, - "acc,exam_id__2015-17": 0.0, - "acc,exam_id__2011-04": 0.0, - "acc,exam_id__2018-25": 0.0, - "acc,exam_id__2010-02": 0.0, - "acc,exam_id__2014-15": 0.0, - "acc,exam_id__2015-16": 0.0, - "acc,exam_id__2016-20": 0.0, - "acc,exam_id__2016-20a": 0.0, - "acc,exam_id__2013-10": 0.0, - "acc,exam_id__2011-03": 0.0, - "acc,exam_id__2017-24": 0.0, - "acc,exam_id__2017-22": 0.0, - "acc,exam_id__2011-05": 0.0, - "acc,exam_id__2012-08": 0.0, - "acc,exam_id__2012-06": 0.0, - "acc,exam_id__2013-11": 0.0, - "acc,exam_id__2017-23": 0.0, - "acc,exam_id__2016-21": 0.0, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6785350340853187, - "acc,all": 0.6968272620446534 - }, - "tweetsentbr": { - "f1_macro,all": 0.719922008738035, - "acc,all": 0.7472636815920398, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6781400966183575, + "acc,all": 0.5992647058823529, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0, + "mse,all": 3.062949346405229, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.006954102920723227, + "acc,exam_id__USP_2020": 0.0, + "acc,exam_id__UNICAMP_2019": 0.0, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__USP_2022": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__USP_2021": 0.038461538461538464, + "acc,exam_id__UNICAMP_2018": 0.018518518518518517, + "acc,exam_id__USP_2018": 0.018518518518518517, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__USP_2023": 0.022727272727272728, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__USP_2024": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.0006997900629811056, + "acc,exam_id__2016_2": 0.0, + "acc,exam_id__2022": 0.0, + "acc,exam_id__2016": 0.0, + "acc,exam_id__2010": 0.0, + "acc,exam_id__2009": 0.0, + "acc,exam_id__2015": 0.008403361344537815, + "acc,exam_id__2013": 0.0, + "acc,exam_id__2012": 0.0, + "acc,exam_id__2017": 0.0, + "acc,exam_id__2011": 0.0, + "acc,exam_id__2023": 0.0, + "acc,exam_id__2014": 0.0 + }, + "faquad_nli": { + "f1_macro,all": 0.10463621984974299, + "acc,all": 0.08153846153846153, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8440869615900215, + "acc,all": 0.8471428571428572 + }, + "oab_exams": { + "acc,all": 0.0, + "acc,exam_id__2014-14": 0.0, + "acc,exam_id__2015-18": 0.0, + "acc,exam_id__2010-01": 0.0, + "acc,exam_id__2012-06a": 0.0, + "acc,exam_id__2012-09": 0.0, + "acc,exam_id__2013-12": 0.0, + "acc,exam_id__2016-19": 0.0, + "acc,exam_id__2012-07": 0.0, + "acc,exam_id__2014-13": 0.0, + "acc,exam_id__2015-17": 0.0, + "acc,exam_id__2011-04": 0.0, + "acc,exam_id__2018-25": 0.0, + "acc,exam_id__2010-02": 0.0, + "acc,exam_id__2014-15": 0.0, + "acc,exam_id__2015-16": 0.0, + "acc,exam_id__2016-20": 0.0, + "acc,exam_id__2016-20a": 0.0, + "acc,exam_id__2013-10": 0.0, + "acc,exam_id__2011-03": 0.0, + "acc,exam_id__2017-24": 0.0, + "acc,exam_id__2017-22": 0.0, + "acc,exam_id__2011-05": 0.0, + "acc,exam_id__2012-08": 0.0, + "acc,exam_id__2012-06": 0.0, + "acc,exam_id__2013-11": 0.0, + "acc,exam_id__2017-23": 0.0, + "acc,exam_id__2016-21": 0.0, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6785350340853187, + "acc,all": 0.6968272620446534 + }, + "tweetsentbr": { + "f1_macro,all": 0.719922008738035, + "acc,all": 0.7472636815920398, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 5, - "non_truncated": 14145, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 5, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 2, - "accelerate_num_process": null, - "model_sha": "6eb5569e56644ea662b048e029de9d093e97d4b6", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 144596811424, - "model_num_parameters": 72287920128, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1159.2818627450981, - "min_seq_length": 1140, - "max_seq_length": 1221, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1365.2818627450981, - "min_seq_length": 1346, - "max_seq_length": 1427, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 3, - "non_truncated": 716, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 3, - "mean_seq_length": 1455.2378303198886, - "min_seq_length": 1135, - "max_seq_length": 2106, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.995827538247566 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1375.7809657102869, - "min_seq_length": 1145, - "max_seq_length": 2336, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1290.6876923076923, - "min_seq_length": 1246, - "max_seq_length": 1386, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1007.6992857142857, - "min_seq_length": 989, - "max_seq_length": 1228, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 5, + "non_truncated": 14145, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 5, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 2, + "accelerate_num_process": null, + "model_sha": "6eb5569e56644ea662b048e029de9d093e97d4b6", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 144596811424, + "model_num_parameters": 72287920128, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1163.888382687927, - "min_seq_length": 934, - "max_seq_length": 1577, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1159.2818627450981, + "min_seq_length": 1140, + "max_seq_length": 1221, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1365.2818627450981, + "min_seq_length": 1346, + "max_seq_length": 1427, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 3, + "non_truncated": 716, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 3, + "mean_seq_length": 1455.2378303198886, + "min_seq_length": 1135, + "max_seq_length": 2106, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.995827538247566 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1375.7809657102869, + "min_seq_length": 1145, + "max_seq_length": 2336, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1290.6876923076923, + "min_seq_length": 1246, + "max_seq_length": 1386, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1007.6992857142857, + "min_seq_length": 989, + "max_seq_length": 1228, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1163.888382687927, + "min_seq_length": 934, + "max_seq_length": 1577, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1450.62044653349, + "min_seq_length": 1420, + "max_seq_length": 1484, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1301.0567164179104, + "min_seq_length": 1283, + "max_seq_length": 1349, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1450.62044653349, - "min_seq_length": 1420, - "max_seq_length": 1484, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Qwen/Qwen-72B-Chat,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1301.0567164179104, - "min_seq_length": 1283, - "max_seq_length": 1349, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Qwen/Qwen-72B-Chat,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/Qwen/Qwen-72B-Chat/results_2024-03-04T09-41-26.834248.json b/Qwen/Qwen-72B-Chat/results_2024-03-04T09-41-26.834248.json index 101fa7ea5811ad20d042fe3edd6655334057e9f3..9c994cfb621ee80b2f35d431578e1394f44a59c4 100644 --- a/Qwen/Qwen-72B-Chat/results_2024-03-04T09-41-26.834248.json +++ b/Qwen/Qwen-72B-Chat/results_2024-03-04T09-41-26.834248.json @@ -34,15 +34,15 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.30800541944916443, - "all_grouped_npm": -0.0016944521105141603, + "all_grouped_average": 0.3369971348739089, + "all_grouped_npm": 0.05566207328235863, "all_grouped": { "enem_challenge": 0.0006997900629811056, "bluex": 0.006954102920723227, "oab_exams": 0.0, - "assin2_rte": 0.45209339774557167, + "assin2_rte": 0.6781400966183575, "assin2_sts": 0.0, - "faquad_nli": 0.06975747989982865, + "faquad_nli": 0.10463621984974299, "hatebr_offensive": 0.8440869615900215, "portuguese_hate_speech": 0.6785350340853187, "tweetsentbr": 0.719922008738035 @@ -51,9 +51,9 @@ "harness|enem_challenge|enem_challenge|None|3": 0.0006997900629811056, "harness|bluex|bluex|None|3": 0.006954102920723227, "harness|oab_exams|oab_exams|None|3": 0.0, - "harness|assin2_rte|assin2_rte|None|15": 0.45209339774557167, + "harness|assin2_rte|assin2_rte|None|15": 0.6781400966183575, "harness|assin2_sts|assin2_sts|None|15": 0, - "harness|faquad_nli|faquad_nli|None|15": 0.06975747989982865, + "harness|faquad_nli|faquad_nli|None|15": 0.10463621984974299, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8440869615900215, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6785350340853187, "harness|tweetsentbr|tweetsentbr|None|25": 0.719922008738035 @@ -125,9 +125,9 @@ "main_score": 0.0 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.45209339774557167, + "f1_macro,all": 0.6781400966183575, "acc,all": 0.5992647058823529, - "main_score": 0.45209339774557167 + "main_score": 0.6781400966183575 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0, @@ -135,9 +135,9 @@ "main_score": 0 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.06975747989982865, + "f1_macro,all": 0.10463621984974299, "acc,all": 0.08153846153846153, - "main_score": 0.06975747989982865 + "main_score": 0.10463621984974299 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.8440869615900215, diff --git a/Qwen/Qwen1.5-0.5B/raw_2024-02-24T20-35-38.476499/results.json b/Qwen/Qwen1.5-0.5B/raw_2024-02-24T20-35-38.476499/results.json index 9fdec0ec86849ed0a9346936a14ff54805238b3e..f64d05de2399f31ac7b38d61d0381fa145d9e237 100644 --- a/Qwen/Qwen1.5-0.5B/raw_2024-02-24T20-35-38.476499/results.json +++ b/Qwen/Qwen1.5-0.5B/raw_2024-02-24T20-35-38.476499/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.2669387478705755, - "acc,all": 0.5261437908496732, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.15331435595983967, - "mse,all": 2.3761356209150333, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.18915159944367177, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__USP_2022": 0.1836734693877551, - "acc,exam_id__USP_2023": 0.11363636363636363, - "acc,exam_id__UNICAMP_2018": 0.2037037037037037, - "acc,exam_id__UNICAMP_2019": 0.16, - "acc,exam_id__USP_2020": 0.21428571428571427, - "acc,exam_id__UNICAMP_2020": 0.16363636363636364, - "acc,exam_id__UNICAMP_2023": 0.3023255813953488, - "acc,exam_id__USP_2021": 0.19230769230769232, - "acc,exam_id__UNICAMP_2022": 0.23076923076923078, - "acc,exam_id__UNICAMP_2024": 0.17777777777777778, - "acc,exam_id__USP_2018": 0.1111111111111111, - "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, - "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, - "acc,exam_id__USP_2024": 0.12195121951219512, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.19244226731980407, - "acc,exam_id__2013": 0.1574074074074074, - "acc,exam_id__2016_2": 0.1951219512195122, - "acc,exam_id__2016": 0.19008264462809918, - "acc,exam_id__2011": 0.20512820512820512, - "acc,exam_id__2017": 0.20689655172413793, - "acc,exam_id__2023": 0.26666666666666666, - "acc,exam_id__2014": 0.2018348623853211, - "acc,exam_id__2012": 0.1896551724137931, - "acc,exam_id__2009": 0.16521739130434782, - "acc,exam_id__2015": 0.14285714285714285, - "acc,exam_id__2022": 0.21052631578947367, - "acc,exam_id__2010": 0.1623931623931624 - }, - "faquad_nli": { - "f1_macro,all": 0.5253674727358938, - "acc,all": 0.7630769230769231, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.3333333333333333, - "acc,all": 0.5 - }, - "oab_exams": { - "acc,all": 0.2305239179954442, - "acc,exam_id__2016-20a": 0.3, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2015-18": 0.25, - "acc,exam_id__2014-14": 0.275, - "acc,exam_id__2012-07": 0.1375, - "acc,exam_id__2015-16": 0.2375, - "acc,exam_id__2011-05": 0.2375, - "acc,exam_id__2012-06a": 0.2375, - "acc,exam_id__2017-23": 0.2125, - "acc,exam_id__2016-19": 0.19230769230769232, - "acc,exam_id__2017-24": 0.225, - "acc,exam_id__2016-20": 0.225, - "acc,exam_id__2017-22": 0.25, - "acc,exam_id__2013-12": 0.175, - "acc,exam_id__2010-02": 0.24, - "acc,exam_id__2011-03": 0.24242424242424243, - "acc,exam_id__2012-08": 0.225, - "acc,exam_id__2013-10": 0.2125, - "acc,exam_id__2016-21": 0.2125, - "acc,exam_id__2014-15": 0.21794871794871795, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2014-13": 0.2375, - "acc,exam_id__2010-01": 0.25882352941176473, - "acc,exam_id__2015-17": 0.24358974358974358, - "acc,exam_id__2013-11": 0.1625, - "acc,exam_id__2011-04": 0.25, - "acc,exam_id__2012-09": 0.23376623376623376, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.2743199631166436, - "acc,all": 0.699177438307873 - }, - "tweetsentbr": { - "f1_macro,all": 0.15147894988110025, - "acc,all": 0.29253731343283584, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.4004081218058632, + "acc,all": 0.5261437908496732, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.15331435595983967, + "mse,all": 2.3761356209150333, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.18915159944367177, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__USP_2022": 0.1836734693877551, + "acc,exam_id__USP_2023": 0.11363636363636363, + "acc,exam_id__UNICAMP_2018": 0.2037037037037037, + "acc,exam_id__UNICAMP_2019": 0.16, + "acc,exam_id__USP_2020": 0.21428571428571427, + "acc,exam_id__UNICAMP_2020": 0.16363636363636364, + "acc,exam_id__UNICAMP_2023": 0.3023255813953488, + "acc,exam_id__USP_2021": 0.19230769230769232, + "acc,exam_id__UNICAMP_2022": 0.23076923076923078, + "acc,exam_id__UNICAMP_2024": 0.17777777777777778, + "acc,exam_id__USP_2018": 0.1111111111111111, + "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, + "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, + "acc,exam_id__USP_2024": 0.12195121951219512, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.19244226731980407, + "acc,exam_id__2013": 0.1574074074074074, + "acc,exam_id__2016_2": 0.1951219512195122, + "acc,exam_id__2016": 0.19008264462809918, + "acc,exam_id__2011": 0.20512820512820512, + "acc,exam_id__2017": 0.20689655172413793, + "acc,exam_id__2023": 0.26666666666666666, + "acc,exam_id__2014": 0.2018348623853211, + "acc,exam_id__2012": 0.1896551724137931, + "acc,exam_id__2009": 0.16521739130434782, + "acc,exam_id__2015": 0.14285714285714285, + "acc,exam_id__2022": 0.21052631578947367, + "acc,exam_id__2010": 0.1623931623931624 + }, + "faquad_nli": { + "f1_macro,all": 0.5253674727358938, + "acc,all": 0.7630769230769231, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.3333333333333333, + "acc,all": 0.5 + }, + "oab_exams": { + "acc,all": 0.2305239179954442, + "acc,exam_id__2016-20a": 0.3, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2015-18": 0.25, + "acc,exam_id__2014-14": 0.275, + "acc,exam_id__2012-07": 0.1375, + "acc,exam_id__2015-16": 0.2375, + "acc,exam_id__2011-05": 0.2375, + "acc,exam_id__2012-06a": 0.2375, + "acc,exam_id__2017-23": 0.2125, + "acc,exam_id__2016-19": 0.19230769230769232, + "acc,exam_id__2017-24": 0.225, + "acc,exam_id__2016-20": 0.225, + "acc,exam_id__2017-22": 0.25, + "acc,exam_id__2013-12": 0.175, + "acc,exam_id__2010-02": 0.24, + "acc,exam_id__2011-03": 0.24242424242424243, + "acc,exam_id__2012-08": 0.225, + "acc,exam_id__2013-10": 0.2125, + "acc,exam_id__2016-21": 0.2125, + "acc,exam_id__2014-15": 0.21794871794871795, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2014-13": 0.2375, + "acc,exam_id__2010-01": 0.25882352941176473, + "acc,exam_id__2015-17": 0.24358974358974358, + "acc,exam_id__2013-11": 0.1625, + "acc,exam_id__2011-04": 0.25, + "acc,exam_id__2012-09": 0.23376623376623376, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4114799446749654, + "acc,all": 0.699177438307873 + }, + "tweetsentbr": { + "f1_macro,all": 0.15147894988110025, + "acc,all": 0.29253731343283584, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "fedce23ef6393499effdf4958f9b3256f299cc7d", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 1129305088, - "model_num_parameters": 463987712, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1307.2818627450981, - "min_seq_length": 1288, - "max_seq_length": 1369, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1498.2818627450981, - "min_seq_length": 1479, - "max_seq_length": 1560, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2378303198886, - "min_seq_length": 1175, - "max_seq_length": 2146, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1415.7809657102869, - "min_seq_length": 1185, - "max_seq_length": 2376, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1438.6876923076923, - "min_seq_length": 1394, - "max_seq_length": 1534, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1245.6992857142857, - "min_seq_length": 1227, - "max_seq_length": 1466, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "fedce23ef6393499effdf4958f9b3256f299cc7d", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 1129305088, + "model_num_parameters": 463987712, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1203.888382687927, - "min_seq_length": 974, - "max_seq_length": 1617, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1307.2818627450981, + "min_seq_length": 1288, + "max_seq_length": 1369, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1498.2818627450981, + "min_seq_length": 1479, + "max_seq_length": 1560, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2378303198886, + "min_seq_length": 1175, + "max_seq_length": 2146, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1415.7809657102869, + "min_seq_length": 1185, + "max_seq_length": 2376, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1438.6876923076923, + "min_seq_length": 1394, + "max_seq_length": 1534, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1245.6992857142857, + "min_seq_length": 1227, + "max_seq_length": 1466, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1203.888382687927, + "min_seq_length": 974, + "max_seq_length": 1617, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1688.62044653349, + "min_seq_length": 1658, + "max_seq_length": 1722, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1539.0567164179104, + "min_seq_length": 1521, + "max_seq_length": 1587, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1688.62044653349, - "min_seq_length": 1658, - "max_seq_length": 1722, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Qwen/Qwen1.5-0.5B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1539.0567164179104, - "min_seq_length": 1521, - "max_seq_length": 1587, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Qwen/Qwen1.5-0.5B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-0.5B/results_2024-02-24T20-35-38.476499.json b/Qwen/Qwen1.5-0.5B/results_2024-02-24T20-35-38.476499.json index 3086c0e4b84de0cc120c8c5d50e70fba5449084a..7f37fe0406934ab82986e7bb5bca500a07fcb62d 100644 --- a/Qwen/Qwen1.5-0.5B/results_2024-02-24T20-35-38.476499.json +++ b/Qwen/Qwen1.5-0.5B/results_2024-02-24T20-35-38.476499.json @@ -34,28 +34,28 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.25743006751736736, - "all_grouped_npm": -0.139537678958747, + "all_grouped_average": 0.2874999959055462, + "all_grouped_npm": -0.08062638247785732, "all_grouped": { "enem_challenge": 0.19244226731980407, "bluex": 0.18915159944367177, "oab_exams": 0.2305239179954442, - "assin2_rte": 0.2669387478705755, + "assin2_rte": 0.4004081218058632, "assin2_sts": 0.15331435595983967, "faquad_nli": 0.5253674727358938, "hatebr_offensive": 0.3333333333333333, - "portuguese_hate_speech": 0.2743199631166436, + "portuguese_hate_speech": 0.4114799446749654, "tweetsentbr": 0.15147894988110025 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.19244226731980407, "harness|bluex|bluex|None|3": 0.18915159944367177, "harness|oab_exams|oab_exams|None|3": 0.2305239179954442, - "harness|assin2_rte|assin2_rte|None|15": 0.2669387478705755, + "harness|assin2_rte|assin2_rte|None|15": 0.4004081218058632, "harness|assin2_sts|assin2_sts|None|15": 0.15331435595983967, "harness|faquad_nli|faquad_nli|None|15": 0.5253674727358938, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3333333333333333, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2743199631166436, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4114799446749654, "harness|tweetsentbr|tweetsentbr|None|25": 0.15147894988110025 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -125,9 +125,9 @@ "main_score": 0.2305239179954442 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.2669387478705755, + "f1_macro,all": 0.4004081218058632, "acc,all": 0.5261437908496732, - "main_score": 0.2669387478705755 + "main_score": 0.4004081218058632 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.15331435595983967, @@ -145,9 +145,9 @@ "main_score": 0.3333333333333333 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.2743199631166436, + "f1_macro,all": 0.4114799446749654, "acc,all": 0.699177438307873, - "main_score": 0.2743199631166436 + "main_score": 0.4114799446749654 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.15147894988110025, diff --git a/Qwen/Qwen1.5-1.8B/raw_2024-02-22T17-34-27.100441/results.json b/Qwen/Qwen1.5-1.8B/raw_2024-02-22T17-34-27.100441/results.json index ca8fedd8478fc6b519d84fbe45b3b604842e7e7c..406b4088b3de9558c782d9ae02381065b212f4a3 100644 --- a/Qwen/Qwen1.5-1.8B/raw_2024-02-22T17-34-27.100441/results.json +++ b/Qwen/Qwen1.5-1.8B/raw_2024-02-22T17-34-27.100441/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.42057189695322883, - "acc,all": 0.5420751633986928, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.32199828051873347, - "mse,all": 2.7759530610702616, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.2517385257301808, - "acc,exam_id__USP_2019": 0.3, - "acc,exam_id__USP_2021": 0.2692307692307692, - "acc,exam_id__UNICAMP_2023": 0.3953488372093023, - "acc,exam_id__USP_2023": 0.1590909090909091, - "acc,exam_id__UNICAMP_2020": 0.23636363636363636, - "acc,exam_id__USP_2024": 0.17073170731707318, - "acc,exam_id__UNICAMP_2021_1": 0.34782608695652173, - "acc,exam_id__UNICAMP_2021_2": 0.17647058823529413, - "acc,exam_id__UNICAMP_2018": 0.35185185185185186, - "acc,exam_id__USP_2018": 0.1111111111111111, - "acc,exam_id__USP_2020": 0.25, - "acc,exam_id__UNICAMP_2022": 0.28205128205128205, - "acc,exam_id__USP_2022": 0.22448979591836735, - "acc,exam_id__UNICAMP_2024": 0.35555555555555557, - "acc,exam_id__UNICAMP_2019": 0.18, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.27711686494051785, - "acc,exam_id__2016": 0.2066115702479339, - "acc,exam_id__2012": 0.2413793103448276, - "acc,exam_id__2013": 0.24074074074074073, - "acc,exam_id__2010": 0.24786324786324787, - "acc,exam_id__2017": 0.29310344827586204, - "acc,exam_id__2022": 0.3308270676691729, - "acc,exam_id__2009": 0.2608695652173913, - "acc,exam_id__2011": 0.23076923076923078, - "acc,exam_id__2023": 0.362962962962963, - "acc,exam_id__2014": 0.3119266055045872, - "acc,exam_id__2015": 0.25210084033613445, - "acc,exam_id__2016_2": 0.3252032520325203 - }, - "faquad_nli": { - "f1_macro,all": 0.19973603600941603, - "acc,all": 0.23076923076923078, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.4317428166587821, - "acc,all": 0.4907142857142857 - }, - "oab_exams": { - "acc,all": 0.28792710706150343, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2014-15": 0.28205128205128205, - "acc,exam_id__2011-05": 0.275, - "acc,exam_id__2014-14": 0.275, - "acc,exam_id__2018-25": 0.3125, - "acc,exam_id__2015-17": 0.32051282051282054, - "acc,exam_id__2015-18": 0.3, - "acc,exam_id__2012-07": 0.3375, - "acc,exam_id__2014-13": 0.3, - "acc,exam_id__2017-24": 0.275, - "acc,exam_id__2012-06a": 0.3875, - "acc,exam_id__2013-11": 0.225, - "acc,exam_id__2015-16": 0.25, - "acc,exam_id__2017-22": 0.275, - "acc,exam_id__2010-02": 0.32, - "acc,exam_id__2016-21": 0.2875, - "acc,exam_id__2011-04": 0.3125, - "acc,exam_id__2016-19": 0.32051282051282054, - "acc,exam_id__2013-12": 0.1625, - "acc,exam_id__2017-23": 0.275, - "acc,exam_id__2013-10": 0.225, - "acc,exam_id__2012-09": 0.18181818181818182, - "acc,exam_id__2010-01": 0.29411764705882354, - "acc,exam_id__2012-06": 0.3125, - "acc,exam_id__2016-20": 0.3125, - "acc,exam_id__2016-20a": 0.3125, - "acc,exam_id__2012-08": 0.2625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.31879746109770185, - "acc,all": 0.5088131609870741 - }, - "tweetsentbr": { - "f1_macro,all": 0.20266417241880366, - "acc,all": 0.47512437810945274, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.42057189695322883, + "acc,all": 0.5420751633986928, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.32199828051873347, + "mse,all": 2.7759530610702616, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.2517385257301808, + "acc,exam_id__USP_2019": 0.3, + "acc,exam_id__USP_2021": 0.2692307692307692, + "acc,exam_id__UNICAMP_2023": 0.3953488372093023, + "acc,exam_id__USP_2023": 0.1590909090909091, + "acc,exam_id__UNICAMP_2020": 0.23636363636363636, + "acc,exam_id__USP_2024": 0.17073170731707318, + "acc,exam_id__UNICAMP_2021_1": 0.34782608695652173, + "acc,exam_id__UNICAMP_2021_2": 0.17647058823529413, + "acc,exam_id__UNICAMP_2018": 0.35185185185185186, + "acc,exam_id__USP_2018": 0.1111111111111111, + "acc,exam_id__USP_2020": 0.25, + "acc,exam_id__UNICAMP_2022": 0.28205128205128205, + "acc,exam_id__USP_2022": 0.22448979591836735, + "acc,exam_id__UNICAMP_2024": 0.35555555555555557, + "acc,exam_id__UNICAMP_2019": 0.18, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.27711686494051785, + "acc,exam_id__2016": 0.2066115702479339, + "acc,exam_id__2012": 0.2413793103448276, + "acc,exam_id__2013": 0.24074074074074073, + "acc,exam_id__2010": 0.24786324786324787, + "acc,exam_id__2017": 0.29310344827586204, + "acc,exam_id__2022": 0.3308270676691729, + "acc,exam_id__2009": 0.2608695652173913, + "acc,exam_id__2011": 0.23076923076923078, + "acc,exam_id__2023": 0.362962962962963, + "acc,exam_id__2014": 0.3119266055045872, + "acc,exam_id__2015": 0.25210084033613445, + "acc,exam_id__2016_2": 0.3252032520325203 + }, + "faquad_nli": { + "f1_macro,all": 0.19973603600941603, + "acc,all": 0.23076923076923078, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.4317428166587821, + "acc,all": 0.4907142857142857 + }, + "oab_exams": { + "acc,all": 0.28792710706150343, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2014-15": 0.28205128205128205, + "acc,exam_id__2011-05": 0.275, + "acc,exam_id__2014-14": 0.275, + "acc,exam_id__2018-25": 0.3125, + "acc,exam_id__2015-17": 0.32051282051282054, + "acc,exam_id__2015-18": 0.3, + "acc,exam_id__2012-07": 0.3375, + "acc,exam_id__2014-13": 0.3, + "acc,exam_id__2017-24": 0.275, + "acc,exam_id__2012-06a": 0.3875, + "acc,exam_id__2013-11": 0.225, + "acc,exam_id__2015-16": 0.25, + "acc,exam_id__2017-22": 0.275, + "acc,exam_id__2010-02": 0.32, + "acc,exam_id__2016-21": 0.2875, + "acc,exam_id__2011-04": 0.3125, + "acc,exam_id__2016-19": 0.32051282051282054, + "acc,exam_id__2013-12": 0.1625, + "acc,exam_id__2017-23": 0.275, + "acc,exam_id__2013-10": 0.225, + "acc,exam_id__2012-09": 0.18181818181818182, + "acc,exam_id__2010-01": 0.29411764705882354, + "acc,exam_id__2012-06": 0.3125, + "acc,exam_id__2016-20": 0.3125, + "acc,exam_id__2016-20a": 0.3125, + "acc,exam_id__2012-08": 0.2625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.47819619164655275, + "acc,all": 0.5088131609870741 + }, + "tweetsentbr": { + "f1_macro,all": 0.2702188965584049, + "acc,all": 0.47512437810945274, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "921f88e4573192da5a10c809ed188603ea0f3937", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 4076316672, - "model_num_parameters": 1836828672, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1307.2818627450981, - "min_seq_length": 1288, - "max_seq_length": 1369, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1498.2818627450981, - "min_seq_length": 1479, - "max_seq_length": 1560, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2378303198886, - "min_seq_length": 1175, - "max_seq_length": 2146, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1415.7809657102869, - "min_seq_length": 1185, - "max_seq_length": 2376, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1438.6876923076923, - "min_seq_length": 1394, - "max_seq_length": 1534, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1245.6992857142857, - "min_seq_length": 1227, - "max_seq_length": 1466, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "921f88e4573192da5a10c809ed188603ea0f3937", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 4076316672, + "model_num_parameters": 1836828672, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1203.888382687927, - "min_seq_length": 974, - "max_seq_length": 1617, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1307.2818627450981, + "min_seq_length": 1288, + "max_seq_length": 1369, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1498.2818627450981, + "min_seq_length": 1479, + "max_seq_length": 1560, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2378303198886, + "min_seq_length": 1175, + "max_seq_length": 2146, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1415.7809657102869, + "min_seq_length": 1185, + "max_seq_length": 2376, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1438.6876923076923, + "min_seq_length": 1394, + "max_seq_length": 1534, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1245.6992857142857, + "min_seq_length": 1227, + "max_seq_length": 1466, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1203.888382687927, + "min_seq_length": 974, + "max_seq_length": 1617, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1688.62044653349, + "min_seq_length": 1658, + "max_seq_length": 1722, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1539.0567164179104, + "min_seq_length": 1521, + "max_seq_length": 1587, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1688.62044653349, - "min_seq_length": 1658, - "max_seq_length": 1722, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Qwen/Qwen1.5-1.8B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1539.0567164179104, - "min_seq_length": 1521, - "max_seq_length": 1587, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Qwen/Qwen1.5-1.8B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-1.8B/results_2024-02-22T17-34-27.100441.json b/Qwen/Qwen1.5-1.8B/results_2024-02-22T17-34-27.100441.json index a96f0f1bc4c80c40866f11b3c05a3dde063e6625..7ce69167928c03288f7b985de6bd097928d22ad9 100644 --- a/Qwen/Qwen1.5-1.8B/results_2024-02-22T17-34-27.100441.json +++ b/Qwen/Qwen1.5-1.8B/results_2024-02-22T17-34-27.100441.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.3013659068209853, - "all_grouped_npm": -0.084109000068956, + "all_grouped_average": 0.3265829573419245, + "all_grouped_npm": -0.038945053101649, "all_grouped": { "enem_challenge": 0.27711686494051785, "bluex": 0.2517385257301808, @@ -44,8 +44,8 @@ "assin2_sts": 0.32199828051873347, "faquad_nli": 0.19973603600941603, "hatebr_offensive": 0.4317428166587821, - "portuguese_hate_speech": 0.31879746109770185, - "tweetsentbr": 0.20266417241880366 + "portuguese_hate_speech": 0.47819619164655275, + "tweetsentbr": 0.2702188965584049 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.27711686494051785, @@ -55,8 +55,8 @@ "harness|assin2_sts|assin2_sts|None|15": 0.32199828051873347, "harness|faquad_nli|faquad_nli|None|15": 0.19973603600941603, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4317428166587821, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.31879746109770185, - "harness|tweetsentbr|tweetsentbr|None|25": 0.20266417241880366 + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.47819619164655275, + "harness|tweetsentbr|tweetsentbr|None|25": 0.2702188965584049 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.27711686494051785, @@ -145,14 +145,14 @@ "main_score": 0.4317428166587821 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.31879746109770185, + "f1_macro,all": 0.47819619164655275, "acc,all": 0.5088131609870741, - "main_score": 0.31879746109770185 + "main_score": 0.47819619164655275 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.20266417241880366, + "f1_macro,all": 0.2702188965584049, "acc,all": 0.47512437810945274, - "main_score": 0.20266417241880366 + "main_score": 0.2702188965584049 } }, "config_tasks": { diff --git a/Qwen/Qwen1.5-110B-Chat/raw_2024-05-24T17-17-26.943633/results.json b/Qwen/Qwen1.5-110B-Chat/raw_2024-05-24T17-17-26.943633/results.json index 14944fee96193bafba3d113b45c398c0944d5abe..a894d7b920d7af3d55990e293343e742a3decd4f 100644 --- a/Qwen/Qwen1.5-110B-Chat/raw_2024-05-24T17-17-26.943633/results.json +++ b/Qwen/Qwen1.5-110B-Chat/raw_2024-05-24T17-17-26.943633/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9238241568808003, - "acc,all": 0.9240196078431373, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7616643629663564, - "mse,all": 0.5825898692810457, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.7148817802503477, - "acc,exam_id__USP_2020": 0.7142857142857143, - "acc,exam_id__UNICAMP_2020": 0.6909090909090909, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__UNICAMP_2023": 0.7674418604651163, - "acc,exam_id__USP_2018": 0.6296296296296297, - "acc,exam_id__USP_2019": 0.7, - "acc,exam_id__USP_2023": 0.7272727272727273, - "acc,exam_id__USP_2024": 0.9024390243902439, - "acc,exam_id__UNICAMP_2018": 0.6851851851851852, - "acc,exam_id__UNICAMP_2022": 0.7435897435897436, - "acc,exam_id__UNICAMP_2024": 0.7111111111111111, - "acc,exam_id__UNICAMP_2019": 0.74, - "acc,exam_id__UNICAMP_2021_2": 0.7254901960784313, - "acc,exam_id__USP_2022": 0.7346938775510204, - "acc,exam_id__USP_2021": 0.6923076923076923, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7795661301609517, - "acc,exam_id__2012": 0.7844827586206896, - "acc,exam_id__2016_2": 0.7804878048780488, - "acc,exam_id__2022": 0.7368421052631579, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2023": 0.837037037037037, - "acc,exam_id__2009": 0.7391304347826086, - "acc,exam_id__2016": 0.7520661157024794, - "acc,exam_id__2011": 0.8632478632478633, - "acc,exam_id__2017": 0.7844827586206896, - "acc,exam_id__2013": 0.7592592592592593, - "acc,exam_id__2010": 0.811965811965812, - "acc,exam_id__2014": 0.7706422018348624 - }, - "faquad_nli": { - "f1_macro,all": 0.8072566392674566, - "acc,all": 0.8461538461538461, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8397521389256764, - "acc,all": 0.8421428571428572 - }, - "oab_exams": { - "acc,all": 0.5708428246013667, - "acc,exam_id__2013-10": 0.7, - "acc,exam_id__2015-16": 0.55, - "acc,exam_id__2012-09": 0.5454545454545454, - "acc,exam_id__2014-14": 0.625, - "acc,exam_id__2017-24": 0.475, - "acc,exam_id__2011-05": 0.5625, - "acc,exam_id__2015-18": 0.6875, - "acc,exam_id__2010-02": 0.62, - "acc,exam_id__2011-04": 0.475, - "acc,exam_id__2010-01": 0.4470588235294118, - "acc,exam_id__2013-11": 0.575, - "acc,exam_id__2012-07": 0.5375, - "acc,exam_id__2012-06a": 0.625, - "acc,exam_id__2016-20": 0.5625, - "acc,exam_id__2014-15": 0.6794871794871795, - "acc,exam_id__2016-21": 0.45, - "acc,exam_id__2014-13": 0.5375, - "acc,exam_id__2011-03": 0.43434343434343436, - "acc,exam_id__2016-19": 0.5512820512820513, - "acc,exam_id__2013-12": 0.6625, - "acc,exam_id__2017-23": 0.525, - "acc,exam_id__2012-06": 0.5875, - "acc,exam_id__2016-20a": 0.5375, - "acc,exam_id__2017-22": 0.625, - "acc,exam_id__2012-08": 0.6, - "acc,exam_id__2015-17": 0.6794871794871795, - "acc,exam_id__2018-25": 0.5875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6261766055657461, - "acc,all": 0.6321974148061105 - }, - "tweetsentbr": { - "f1_macro,all": 0.5225783366897532, - "acc,all": 0.7472636815920398, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9238241568808003, + "acc,all": 0.9240196078431373, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7616643629663564, + "mse,all": 0.5825898692810457, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.7148817802503477, + "acc,exam_id__USP_2020": 0.7142857142857143, + "acc,exam_id__UNICAMP_2020": 0.6909090909090909, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__UNICAMP_2023": 0.7674418604651163, + "acc,exam_id__USP_2018": 0.6296296296296297, + "acc,exam_id__USP_2019": 0.7, + "acc,exam_id__USP_2023": 0.7272727272727273, + "acc,exam_id__USP_2024": 0.9024390243902439, + "acc,exam_id__UNICAMP_2018": 0.6851851851851852, + "acc,exam_id__UNICAMP_2022": 0.7435897435897436, + "acc,exam_id__UNICAMP_2024": 0.7111111111111111, + "acc,exam_id__UNICAMP_2019": 0.74, + "acc,exam_id__UNICAMP_2021_2": 0.7254901960784313, + "acc,exam_id__USP_2022": 0.7346938775510204, + "acc,exam_id__USP_2021": 0.6923076923076923, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7795661301609517, + "acc,exam_id__2012": 0.7844827586206896, + "acc,exam_id__2016_2": 0.7804878048780488, + "acc,exam_id__2022": 0.7368421052631579, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2023": 0.837037037037037, + "acc,exam_id__2009": 0.7391304347826086, + "acc,exam_id__2016": 0.7520661157024794, + "acc,exam_id__2011": 0.8632478632478633, + "acc,exam_id__2017": 0.7844827586206896, + "acc,exam_id__2013": 0.7592592592592593, + "acc,exam_id__2010": 0.811965811965812, + "acc,exam_id__2014": 0.7706422018348624 + }, + "faquad_nli": { + "f1_macro,all": 0.8072566392674566, + "acc,all": 0.8461538461538461, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8397521389256764, + "acc,all": 0.8421428571428572 + }, + "oab_exams": { + "acc,all": 0.5708428246013667, + "acc,exam_id__2013-10": 0.7, + "acc,exam_id__2015-16": 0.55, + "acc,exam_id__2012-09": 0.5454545454545454, + "acc,exam_id__2014-14": 0.625, + "acc,exam_id__2017-24": 0.475, + "acc,exam_id__2011-05": 0.5625, + "acc,exam_id__2015-18": 0.6875, + "acc,exam_id__2010-02": 0.62, + "acc,exam_id__2011-04": 0.475, + "acc,exam_id__2010-01": 0.4470588235294118, + "acc,exam_id__2013-11": 0.575, + "acc,exam_id__2012-07": 0.5375, + "acc,exam_id__2012-06a": 0.625, + "acc,exam_id__2016-20": 0.5625, + "acc,exam_id__2014-15": 0.6794871794871795, + "acc,exam_id__2016-21": 0.45, + "acc,exam_id__2014-13": 0.5375, + "acc,exam_id__2011-03": 0.43434343434343436, + "acc,exam_id__2016-19": 0.5512820512820513, + "acc,exam_id__2013-12": 0.6625, + "acc,exam_id__2017-23": 0.525, + "acc,exam_id__2012-06": 0.5875, + "acc,exam_id__2016-20a": 0.5375, + "acc,exam_id__2017-22": 0.625, + "acc,exam_id__2012-08": 0.6, + "acc,exam_id__2015-17": 0.6794871794871795, + "acc,exam_id__2018-25": 0.5875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6261766055657461, + "acc,all": 0.6321974148061105 + }, + "tweetsentbr": { + "f1_macro,all": 0.6967711155863375, + "acc,all": 0.7472636815920398, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 4, - "accelerate_num_process": null, - "model_sha": "85f86cec25901f2dbd870a86e06756903c9a876a", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 223762026496, - "model_num_parameters": 111209914368, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1307.2818627450981, - "min_seq_length": 1288, - "max_seq_length": 1369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1498.2818627450981, - "min_seq_length": 1479, - "max_seq_length": 1560, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2378303198886, - "min_seq_length": 1175, - "max_seq_length": 2146, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1415.7809657102869, - "min_seq_length": 1185, - "max_seq_length": 2376, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1438.6876923076923, - "min_seq_length": 1394, - "max_seq_length": 1534, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 4, + "accelerate_num_process": null, + "model_sha": "85f86cec25901f2dbd870a86e06756903c9a876a", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 223762026496, + "model_num_parameters": 111209914368, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1245.6992857142857, - "min_seq_length": 1227, - "max_seq_length": 1466, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1203.888382687927, - "min_seq_length": 974, - "max_seq_length": 1617, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1307.2818627450981, + "min_seq_length": 1288, + "max_seq_length": 1369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1498.2818627450981, + "min_seq_length": 1479, + "max_seq_length": 1560, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2378303198886, + "min_seq_length": 1175, + "max_seq_length": 2146, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1415.7809657102869, + "min_seq_length": 1185, + "max_seq_length": 2376, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1438.6876923076923, + "min_seq_length": 1394, + "max_seq_length": 1534, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1245.6992857142857, + "min_seq_length": 1227, + "max_seq_length": 1466, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1203.888382687927, + "min_seq_length": 974, + "max_seq_length": 1617, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1688.62044653349, + "min_seq_length": 1658, + "max_seq_length": 1722, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1539.0567164179104, + "min_seq_length": 1521, + "max_seq_length": 1587, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1688.62044653349, - "min_seq_length": 1658, - "max_seq_length": 1722, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Qwen/Qwen1.5-110B-Chat,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1539.0567164179104, - "min_seq_length": 1521, - "max_seq_length": 1587, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Qwen/Qwen1.5-110B-Chat,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-110B-Chat/raw_2024-05-26T01-01-39.902132/results.json b/Qwen/Qwen1.5-110B-Chat/raw_2024-05-26T01-01-39.902132/results.json index 2d3e100e8d479fdfae3b610e526d0588a890258b..8b06f25d3687c68589c8dda5fe0e42fa59d0de0f 100644 --- a/Qwen/Qwen1.5-110B-Chat/raw_2024-05-26T01-01-39.902132/results.json +++ b/Qwen/Qwen1.5-110B-Chat/raw_2024-05-26T01-01-39.902132/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9200225041420388, - "acc,all": 0.9203431372549019, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7541446204629966, - "mse,all": 0.5800040849673203, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.717663421418637, - "acc,exam_id__UNICAMP_2018": 0.6851851851851852, - "acc,exam_id__UNICAMP_2023": 0.7441860465116279, - "acc,exam_id__USP_2023": 0.7954545454545454, - "acc,exam_id__UNICAMP_2024": 0.7333333333333333, - "acc,exam_id__USP_2024": 0.8780487804878049, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__USP_2020": 0.7321428571428571, - "acc,exam_id__UNICAMP_2020": 0.7090909090909091, - "acc,exam_id__UNICAMP_2022": 0.7435897435897436, - "acc,exam_id__UNICAMP_2019": 0.78, - "acc,exam_id__UNICAMP_2021_2": 0.6862745098039216, - "acc,exam_id__USP_2018": 0.5925925925925926, - "acc,exam_id__USP_2021": 0.75, - "acc,exam_id__USP_2019": 0.65, - "acc,exam_id__USP_2022": 0.7346938775510204, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7718684394681595, - "acc,exam_id__2011": 0.8632478632478633, - "acc,exam_id__2017": 0.7931034482758621, - "acc,exam_id__2015": 0.7478991596638656, - "acc,exam_id__2016": 0.743801652892562, - "acc,exam_id__2016_2": 0.7560975609756098, - "acc,exam_id__2009": 0.7478260869565218, - "acc,exam_id__2012": 0.75, - "acc,exam_id__2010": 0.8034188034188035, - "acc,exam_id__2013": 0.75, - "acc,exam_id__2014": 0.7706422018348624, - "acc,exam_id__2022": 0.7218045112781954, - "acc,exam_id__2023": 0.8148148148148148 - }, - "faquad_nli": { - "f1_macro,all": 0.7928832962550982, - "acc,all": 0.8307692307692308, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8489571799283399, - "acc,all": 0.8507142857142858 - }, - "oab_exams": { - "acc,all": 0.5603644646924829, - "acc,exam_id__2011-03": 0.494949494949495, - "acc,exam_id__2014-13": 0.5, - "acc,exam_id__2013-10": 0.65, - "acc,exam_id__2017-24": 0.4875, - "acc,exam_id__2017-22": 0.6125, - "acc,exam_id__2012-06a": 0.625, - "acc,exam_id__2016-20a": 0.5125, - "acc,exam_id__2012-09": 0.5454545454545454, - "acc,exam_id__2015-16": 0.525, - "acc,exam_id__2011-04": 0.475, - "acc,exam_id__2012-07": 0.525, - "acc,exam_id__2014-14": 0.625, - "acc,exam_id__2014-15": 0.6538461538461539, - "acc,exam_id__2010-02": 0.62, - "acc,exam_id__2015-18": 0.675, - "acc,exam_id__2016-19": 0.5512820512820513, - "acc,exam_id__2012-06": 0.5625, - "acc,exam_id__2013-12": 0.6125, - "acc,exam_id__2011-05": 0.575, - "acc,exam_id__2017-23": 0.525, - "acc,exam_id__2013-11": 0.55, - "acc,exam_id__2016-20": 0.5125, - "acc,exam_id__2016-21": 0.4375, - "acc,exam_id__2018-25": 0.5625, - "acc,exam_id__2010-01": 0.4235294117647059, - "acc,exam_id__2012-08": 0.625, - "acc,exam_id__2015-17": 0.6794871794871795, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6461388323150035, - "acc,all": 0.654524089306698 - }, - "tweetsentbr": { - "f1_macro,all": 0.5134846646789802, - "acc,all": 0.7417910447761195, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9200225041420388, + "acc,all": 0.9203431372549019, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7541446204629966, + "mse,all": 0.5800040849673203, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.717663421418637, + "acc,exam_id__UNICAMP_2018": 0.6851851851851852, + "acc,exam_id__UNICAMP_2023": 0.7441860465116279, + "acc,exam_id__USP_2023": 0.7954545454545454, + "acc,exam_id__UNICAMP_2024": 0.7333333333333333, + "acc,exam_id__USP_2024": 0.8780487804878049, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__USP_2020": 0.7321428571428571, + "acc,exam_id__UNICAMP_2020": 0.7090909090909091, + "acc,exam_id__UNICAMP_2022": 0.7435897435897436, + "acc,exam_id__UNICAMP_2019": 0.78, + "acc,exam_id__UNICAMP_2021_2": 0.6862745098039216, + "acc,exam_id__USP_2018": 0.5925925925925926, + "acc,exam_id__USP_2021": 0.75, + "acc,exam_id__USP_2019": 0.65, + "acc,exam_id__USP_2022": 0.7346938775510204, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7718684394681595, + "acc,exam_id__2011": 0.8632478632478633, + "acc,exam_id__2017": 0.7931034482758621, + "acc,exam_id__2015": 0.7478991596638656, + "acc,exam_id__2016": 0.743801652892562, + "acc,exam_id__2016_2": 0.7560975609756098, + "acc,exam_id__2009": 0.7478260869565218, + "acc,exam_id__2012": 0.75, + "acc,exam_id__2010": 0.8034188034188035, + "acc,exam_id__2013": 0.75, + "acc,exam_id__2014": 0.7706422018348624, + "acc,exam_id__2022": 0.7218045112781954, + "acc,exam_id__2023": 0.8148148148148148 + }, + "faquad_nli": { + "f1_macro,all": 0.7928832962550982, + "acc,all": 0.8307692307692308, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8489571799283399, + "acc,all": 0.8507142857142858 + }, + "oab_exams": { + "acc,all": 0.5603644646924829, + "acc,exam_id__2011-03": 0.494949494949495, + "acc,exam_id__2014-13": 0.5, + "acc,exam_id__2013-10": 0.65, + "acc,exam_id__2017-24": 0.4875, + "acc,exam_id__2017-22": 0.6125, + "acc,exam_id__2012-06a": 0.625, + "acc,exam_id__2016-20a": 0.5125, + "acc,exam_id__2012-09": 0.5454545454545454, + "acc,exam_id__2015-16": 0.525, + "acc,exam_id__2011-04": 0.475, + "acc,exam_id__2012-07": 0.525, + "acc,exam_id__2014-14": 0.625, + "acc,exam_id__2014-15": 0.6538461538461539, + "acc,exam_id__2010-02": 0.62, + "acc,exam_id__2015-18": 0.675, + "acc,exam_id__2016-19": 0.5512820512820513, + "acc,exam_id__2012-06": 0.5625, + "acc,exam_id__2013-12": 0.6125, + "acc,exam_id__2011-05": 0.575, + "acc,exam_id__2017-23": 0.525, + "acc,exam_id__2013-11": 0.55, + "acc,exam_id__2016-20": 0.5125, + "acc,exam_id__2016-21": 0.4375, + "acc,exam_id__2018-25": 0.5625, + "acc,exam_id__2010-01": 0.4235294117647059, + "acc,exam_id__2012-08": 0.625, + "acc,exam_id__2015-17": 0.6794871794871795, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6461388323150035, + "acc,all": 0.654524089306698 + }, + "tweetsentbr": { + "f1_macro,all": 0.6846462195719735, + "acc,all": 0.7417910447761195, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "85f86cec25901f2dbd870a86e06756903c9a876a", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 60687486976, - "model_num_parameters": 111209914368, - "model_is_loaded_in_4bit": true, - "model_is_loaded_in_8bit": null, - "model_is_quantized": true, - "model_device": "cuda:0", - "batch_size": 2, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1307.2818627450981, - "min_seq_length": 1288, - "max_seq_length": 1369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1498.2818627450981, - "min_seq_length": 1479, - "max_seq_length": 1560, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2378303198886, - "min_seq_length": 1175, - "max_seq_length": 2146, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1415.7809657102869, - "min_seq_length": 1185, - "max_seq_length": 2376, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1438.6876923076923, - "min_seq_length": 1394, - "max_seq_length": 1534, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "85f86cec25901f2dbd870a86e06756903c9a876a", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 60687486976, + "model_num_parameters": 111209914368, + "model_is_loaded_in_4bit": true, + "model_is_loaded_in_8bit": null, + "model_is_quantized": true, + "model_device": "cuda:0", + "batch_size": 2, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1245.6992857142857, - "min_seq_length": 1227, - "max_seq_length": 1466, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1203.888382687927, - "min_seq_length": 974, - "max_seq_length": 1617, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1307.2818627450981, + "min_seq_length": 1288, + "max_seq_length": 1369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1498.2818627450981, + "min_seq_length": 1479, + "max_seq_length": 1560, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2378303198886, + "min_seq_length": 1175, + "max_seq_length": 2146, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1415.7809657102869, + "min_seq_length": 1185, + "max_seq_length": 2376, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1438.6876923076923, + "min_seq_length": 1394, + "max_seq_length": 1534, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1245.6992857142857, + "min_seq_length": 1227, + "max_seq_length": 1466, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1203.888382687927, + "min_seq_length": 974, + "max_seq_length": 1617, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1688.62044653349, + "min_seq_length": 1658, + "max_seq_length": 1722, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1539.0567164179104, + "min_seq_length": 1521, + "max_seq_length": 1587, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1688.62044653349, - "min_seq_length": 1658, - "max_seq_length": 1722, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Qwen/Qwen1.5-110B-Chat,load_in_4bit=True,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1539.0567164179104, - "min_seq_length": 1521, - "max_seq_length": 1587, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Qwen/Qwen1.5-110B-Chat,load_in_4bit=True,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-110B-Chat/results_2024-05-24T17-17-26.943633.json b/Qwen/Qwen1.5-110B-Chat/results_2024-05-24T17-17-26.943633.json index 8c38b14b25318e3cfc2bd1c2df86d246423dee46..a9fdf54c7bb7367176d9305de4a6e5c1d3286496 100644 --- a/Qwen/Qwen1.5-110B-Chat/results_2024-05-24T17-17-26.943633.json +++ b/Qwen/Qwen1.5-110B-Chat/results_2024-05-24T17-17-26.943633.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7273936639231616, - "all_grouped_npm": 0.5878780785724127, + "all_grouped_average": 0.7467484171338933, + "all_grouped_npm": 0.6166797946598107, "all_grouped": { "enem_challenge": 0.7795661301609517, "bluex": 0.7148817802503477, @@ -45,7 +45,7 @@ "faquad_nli": 0.8072566392674566, "hatebr_offensive": 0.8397521389256764, "portuguese_hate_speech": 0.6261766055657461, - "tweetsentbr": 0.5225783366897532 + "tweetsentbr": 0.6967711155863375 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7795661301609517, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.8072566392674566, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8397521389256764, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6261766055657461, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5225783366897532 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6967711155863375 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7795661301609517, @@ -150,9 +150,9 @@ "main_score": 0.6261766055657461 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5225783366897532, + "f1_macro,all": 0.6967711155863375, "acc,all": 0.7472636815920398, - "main_score": 0.5225783366897532 + "main_score": 0.6967711155863375 } }, "config_tasks": { diff --git a/Qwen/Qwen1.5-110B-Chat/results_2024-05-26T01-01-39.902132.json b/Qwen/Qwen1.5-110B-Chat/results_2024-05-26T01-01-39.902132.json index 7b2c4acc41c66e52eaece0bb0ae505042a511dc2..ea929f97ceeaf8f766ff34e5993bff71cb3154b1 100644 --- a/Qwen/Qwen1.5-110B-Chat/results_2024-05-26T01-01-39.902132.json +++ b/Qwen/Qwen1.5-110B-Chat/results_2024-05-26T01-01-39.902132.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7250586025957486, - "all_grouped_npm": 0.5858385637477572, + "all_grouped_average": 0.7440765531394145, + "all_grouped_npm": 0.6141390853901172, "all_grouped": { "enem_challenge": 0.7718684394681595, "bluex": 0.717663421418637, @@ -45,7 +45,7 @@ "faquad_nli": 0.7928832962550982, "hatebr_offensive": 0.8489571799283399, "portuguese_hate_speech": 0.6461388323150035, - "tweetsentbr": 0.5134846646789802 + "tweetsentbr": 0.6846462195719735 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7718684394681595, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7928832962550982, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8489571799283399, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6461388323150035, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5134846646789802 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6846462195719735 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7718684394681595, @@ -150,9 +150,9 @@ "main_score": 0.6461388323150035 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5134846646789802, + "f1_macro,all": 0.6846462195719735, "acc,all": 0.7417910447761195, - "main_score": 0.5134846646789802 + "main_score": 0.6846462195719735 } }, "config_tasks": { diff --git a/Qwen/Qwen1.5-32B/raw_2024-04-13T19-30-56.827110/results.json b/Qwen/Qwen1.5-32B/raw_2024-04-13T19-30-56.827110/results.json index dd7e36b8a02f8eb51ed9f40b199beba63a89b824..64a67bcbe1f99540ef27e365f7f65d6914a6a7a2 100644 --- a/Qwen/Qwen1.5-32B/raw_2024-04-13T19-30-56.827110/results.json +++ b/Qwen/Qwen1.5-32B/raw_2024-04-13T19-30-56.827110/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9280834059429739, - "acc,all": 0.9281045751633987, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.5301047372239611, - "mse,all": 2.1142238562091507, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.541029207232267, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__USP_2019": 0.575, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__USP_2021": 0.4423076923076923, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2018": 0.3888888888888889, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__USP_2020": 0.5357142857142857, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6571028691392582, - "acc,exam_id__2009": 0.6434782608695652, - "acc,exam_id__2011": 0.7264957264957265, - "acc,exam_id__2010": 0.6324786324786325, - "acc,exam_id__2014": 0.6880733944954128, - "acc,exam_id__2016": 0.6446280991735537, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2023": 0.7037037037037037, - "acc,exam_id__2022": 0.5939849624060151, - "acc,exam_id__2012": 0.6637931034482759, - "acc,exam_id__2017": 0.5862068965517241, - "acc,exam_id__2015": 0.6554621848739496 - }, - "faquad_nli": { - "f1_macro,all": 0.7256362507759155, - "acc,all": 0.7646153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7760546898020694, - "acc,all": 0.7828571428571428 - }, - "oab_exams": { - "acc,all": 0.40728929384965834, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2012-06": 0.425, - "acc,exam_id__2011-03": 0.41414141414141414, - "acc,exam_id__2016-20a": 0.4125, - "acc,exam_id__2017-22": 0.4875, - "acc,exam_id__2014-14": 0.4625, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2014-13": 0.375, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2014-15": 0.4358974358974359, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2010-01": 0.29411764705882354, - "acc,exam_id__2016-20": 0.425, - "acc,exam_id__2013-12": 0.4, - "acc,exam_id__2011-04": 0.3375, - "acc,exam_id__2015-17": 0.4358974358974359, - "acc,exam_id__2013-11": 0.425, - "acc,exam_id__2012-06a": 0.375, - "acc,exam_id__2010-02": 0.43, - "acc,exam_id__2012-07": 0.325, - "acc,exam_id__2015-18": 0.4625, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2017-24": 0.4375, - "acc,exam_id__2013-10": 0.375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7047723286051433, - "acc,all": 0.7497062279670975 - }, - "tweetsentbr": { - "f1_macro,all": 0.388941564839531, - "acc,all": 0.6502487562189054, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9280834059429739, + "acc,all": 0.9281045751633987, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.5301047372239611, + "mse,all": 2.1142238562091507, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.541029207232267, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__USP_2019": 0.575, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__USP_2021": 0.4423076923076923, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2018": 0.3888888888888889, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__USP_2020": 0.5357142857142857, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6571028691392582, + "acc,exam_id__2009": 0.6434782608695652, + "acc,exam_id__2011": 0.7264957264957265, + "acc,exam_id__2010": 0.6324786324786325, + "acc,exam_id__2014": 0.6880733944954128, + "acc,exam_id__2016": 0.6446280991735537, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2023": 0.7037037037037037, + "acc,exam_id__2022": 0.5939849624060151, + "acc,exam_id__2012": 0.6637931034482759, + "acc,exam_id__2017": 0.5862068965517241, + "acc,exam_id__2015": 0.6554621848739496 + }, + "faquad_nli": { + "f1_macro,all": 0.7256362507759155, + "acc,all": 0.7646153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7760546898020694, + "acc,all": 0.7828571428571428 + }, + "oab_exams": { + "acc,all": 0.40728929384965834, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2012-06": 0.425, + "acc,exam_id__2011-03": 0.41414141414141414, + "acc,exam_id__2016-20a": 0.4125, + "acc,exam_id__2017-22": 0.4875, + "acc,exam_id__2014-14": 0.4625, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2014-13": 0.375, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2014-15": 0.4358974358974359, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2010-01": 0.29411764705882354, + "acc,exam_id__2016-20": 0.425, + "acc,exam_id__2013-12": 0.4, + "acc,exam_id__2011-04": 0.3375, + "acc,exam_id__2015-17": 0.4358974358974359, + "acc,exam_id__2013-11": 0.425, + "acc,exam_id__2012-06a": 0.375, + "acc,exam_id__2010-02": 0.43, + "acc,exam_id__2012-07": 0.325, + "acc,exam_id__2015-18": 0.4625, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2017-24": 0.4375, + "acc,exam_id__2013-10": 0.375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7047723286051433, + "acc,all": 0.7497062279670975 + }, + "tweetsentbr": { + "f1_macro,all": 0.5185887531193748, + "acc,all": 0.6502487562189054, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "cefef80dc06a65f89d1d71d0adbc56d335ca2490", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 66098194432, - "model_num_parameters": 32512218112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 2, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1307.2818627450981, - "min_seq_length": 1288, - "max_seq_length": 1369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1498.2818627450981, - "min_seq_length": 1479, - "max_seq_length": 1560, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2378303198886, - "min_seq_length": 1175, - "max_seq_length": 2146, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1415.7809657102869, - "min_seq_length": 1185, - "max_seq_length": 2376, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1438.6876923076923, - "min_seq_length": 1394, - "max_seq_length": 1534, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1245.6992857142857, - "min_seq_length": 1227, - "max_seq_length": 1466, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "cefef80dc06a65f89d1d71d0adbc56d335ca2490", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 66098194432, + "model_num_parameters": 32512218112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 2, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1203.888382687927, - "min_seq_length": 974, - "max_seq_length": 1617, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1307.2818627450981, + "min_seq_length": 1288, + "max_seq_length": 1369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1498.2818627450981, + "min_seq_length": 1479, + "max_seq_length": 1560, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2378303198886, + "min_seq_length": 1175, + "max_seq_length": 2146, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1415.7809657102869, + "min_seq_length": 1185, + "max_seq_length": 2376, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1438.6876923076923, + "min_seq_length": 1394, + "max_seq_length": 1534, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1245.6992857142857, + "min_seq_length": 1227, + "max_seq_length": 1466, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1203.888382687927, + "min_seq_length": 974, + "max_seq_length": 1617, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1688.62044653349, + "min_seq_length": 1658, + "max_seq_length": 1722, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1539.0567164179104, + "min_seq_length": 1521, + "max_seq_length": 1587, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1688.62044653349, - "min_seq_length": 1658, - "max_seq_length": 1722, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Qwen/Qwen1.5-32B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1539.0567164179104, - "min_seq_length": 1521, - "max_seq_length": 1587, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Qwen/Qwen1.5-32B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "1158bba" + "git_hash": "1158bba" } \ No newline at end of file diff --git a/Qwen/Qwen1.5-32B/results_2024-04-13T19-30-56.827110.json b/Qwen/Qwen1.5-32B/results_2024-04-13T19-30-56.827110.json index b1ba4387c853940f5b0a308e031cfd72b03719a8..55b065d5b02ffb29e180801bf8f7afbd0f02ff4c 100644 --- a/Qwen/Qwen1.5-32B/results_2024-04-13T19-30-56.827110.json +++ b/Qwen/Qwen1.5-32B/results_2024-04-13T19-30-56.827110.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6287793719345308, - "all_grouped_npm": 0.46077156908964045, + "all_grouped_average": 0.6431846150767357, + "all_grouped_npm": 0.4822079428131596, "all_grouped": { "enem_challenge": 0.6571028691392582, "bluex": 0.541029207232267, @@ -45,7 +45,7 @@ "faquad_nli": 0.7256362507759155, "hatebr_offensive": 0.7760546898020694, "portuguese_hate_speech": 0.7047723286051433, - "tweetsentbr": 0.388941564839531 + "tweetsentbr": 0.5185887531193748 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6571028691392582, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7256362507759155, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7760546898020694, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7047723286051433, - "harness|tweetsentbr|tweetsentbr|None|25": 0.388941564839531 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5185887531193748 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6571028691392582, @@ -150,9 +150,9 @@ "main_score": 0.7047723286051433 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.388941564839531, + "f1_macro,all": 0.5185887531193748, "acc,all": 0.6502487562189054, - "main_score": 0.388941564839531 + "main_score": 0.5185887531193748 } }, "config_tasks": { diff --git a/Qwen/Qwen2-0.5B/raw_2024-06-12T06-46-45.592981/results.json b/Qwen/Qwen2-0.5B/raw_2024-06-12T06-46-45.592981/results.json index 742690f24d2bc492d778bf950dc4262f4f616c9b..3688d8ad443cf7ed8676ad35e4a66c42a22001bf 100644 --- a/Qwen/Qwen2-0.5B/raw_2024-06-12T06-46-45.592981/results.json +++ b/Qwen/Qwen2-0.5B/raw_2024-06-12T06-46-45.592981/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.5874337950921603, - "acc,all": 0.6074346405228758, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.02509703564375903, - "mse,all": 2.315557859477124, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.22531293463143254, - "acc,exam_id__USP_2020": 0.26785714285714285, - "acc,exam_id__USP_2021": 0.21153846153846154, - "acc,exam_id__USP_2018": 0.037037037037037035, - "acc,exam_id__USP_2022": 0.12244897959183673, - "acc,exam_id__UNICAMP_2021_2": 0.27450980392156865, - "acc,exam_id__USP_2023": 0.13636363636363635, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__UNICAMP_2018": 0.35185185185185186, - "acc,exam_id__USP_2024": 0.17073170731707318, - "acc,exam_id__UNICAMP_2020": 0.23636363636363636, - "acc,exam_id__UNICAMP_2021_1": 0.15217391304347827, - "acc,exam_id__UNICAMP_2022": 0.2564102564102564, - "acc,exam_id__UNICAMP_2024": 0.3333333333333333, - "acc,exam_id__UNICAMP_2023": 0.27906976744186046, - "acc,exam_id__UNICAMP_2019": 0.3, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.1980405878236529, - "acc,exam_id__2009": 0.14782608695652175, - "acc,exam_id__2016": 0.21487603305785125, - "acc,exam_id__2017": 0.20689655172413793, - "acc,exam_id__2010": 0.23076923076923078, - "acc,exam_id__2011": 0.1794871794871795, - "acc,exam_id__2022": 0.20300751879699247, - "acc,exam_id__2012": 0.20689655172413793, - "acc,exam_id__2013": 0.25, - "acc,exam_id__2015": 0.20168067226890757, - "acc,exam_id__2016_2": 0.17886178861788618, - "acc,exam_id__2023": 0.17037037037037037, - "acc,exam_id__2014": 0.1926605504587156 - }, - "faquad_nli": { - "f1_macro,all": 0.29245826137017844, - "acc,all": 0.7815384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.4634868211245766, - "acc,all": 0.48928571428571427 - }, - "oab_exams": { - "acc,all": 0.269248291571754, - "acc,exam_id__2010-02": 0.21, - "acc,exam_id__2013-10": 0.25, - "acc,exam_id__2010-01": 0.2235294117647059, - "acc,exam_id__2015-17": 0.28205128205128205, - "acc,exam_id__2016-19": 0.3333333333333333, - "acc,exam_id__2017-22": 0.3, - "acc,exam_id__2017-23": 0.2875, - "acc,exam_id__2012-06a": 0.2625, - "acc,exam_id__2012-08": 0.25, - "acc,exam_id__2011-04": 0.25, - "acc,exam_id__2015-18": 0.2875, - "acc,exam_id__2017-24": 0.275, - "acc,exam_id__2013-11": 0.3125, - "acc,exam_id__2014-13": 0.3, - "acc,exam_id__2015-16": 0.225, - "acc,exam_id__2013-12": 0.275, - "acc,exam_id__2012-09": 0.2857142857142857, - "acc,exam_id__2011-03": 0.25252525252525254, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2011-05": 0.225, - "acc,exam_id__2016-20a": 0.2125, - "acc,exam_id__2014-14": 0.2375, - "acc,exam_id__2014-15": 0.34615384615384615, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2016-20": 0.3, - "acc,exam_id__2012-07": 0.3125, - "acc,exam_id__2016-21": 0.275, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.16717374202659538, - "acc,all": 0.30669800235017625 - }, - "tweetsentbr": { - "f1_macro,all": 0.2542010308555615, - "acc,all": 0.31094527363184077, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.5874337950921603, + "acc,all": 0.6074346405228758, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.02509703564375903, + "mse,all": 2.315557859477124, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.22531293463143254, + "acc,exam_id__USP_2020": 0.26785714285714285, + "acc,exam_id__USP_2021": 0.21153846153846154, + "acc,exam_id__USP_2018": 0.037037037037037035, + "acc,exam_id__USP_2022": 0.12244897959183673, + "acc,exam_id__UNICAMP_2021_2": 0.27450980392156865, + "acc,exam_id__USP_2023": 0.13636363636363635, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__UNICAMP_2018": 0.35185185185185186, + "acc,exam_id__USP_2024": 0.17073170731707318, + "acc,exam_id__UNICAMP_2020": 0.23636363636363636, + "acc,exam_id__UNICAMP_2021_1": 0.15217391304347827, + "acc,exam_id__UNICAMP_2022": 0.2564102564102564, + "acc,exam_id__UNICAMP_2024": 0.3333333333333333, + "acc,exam_id__UNICAMP_2023": 0.27906976744186046, + "acc,exam_id__UNICAMP_2019": 0.3, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.1980405878236529, + "acc,exam_id__2009": 0.14782608695652175, + "acc,exam_id__2016": 0.21487603305785125, + "acc,exam_id__2017": 0.20689655172413793, + "acc,exam_id__2010": 0.23076923076923078, + "acc,exam_id__2011": 0.1794871794871795, + "acc,exam_id__2022": 0.20300751879699247, + "acc,exam_id__2012": 0.20689655172413793, + "acc,exam_id__2013": 0.25, + "acc,exam_id__2015": 0.20168067226890757, + "acc,exam_id__2016_2": 0.17886178861788618, + "acc,exam_id__2023": 0.17037037037037037, + "acc,exam_id__2014": 0.1926605504587156 + }, + "faquad_nli": { + "f1_macro,all": 0.43868739205526774, + "acc,all": 0.7815384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.4634868211245766, + "acc,all": 0.48928571428571427 + }, + "oab_exams": { + "acc,all": 0.269248291571754, + "acc,exam_id__2010-02": 0.21, + "acc,exam_id__2013-10": 0.25, + "acc,exam_id__2010-01": 0.2235294117647059, + "acc,exam_id__2015-17": 0.28205128205128205, + "acc,exam_id__2016-19": 0.3333333333333333, + "acc,exam_id__2017-22": 0.3, + "acc,exam_id__2017-23": 0.2875, + "acc,exam_id__2012-06a": 0.2625, + "acc,exam_id__2012-08": 0.25, + "acc,exam_id__2011-04": 0.25, + "acc,exam_id__2015-18": 0.2875, + "acc,exam_id__2017-24": 0.275, + "acc,exam_id__2013-11": 0.3125, + "acc,exam_id__2014-13": 0.3, + "acc,exam_id__2015-16": 0.225, + "acc,exam_id__2013-12": 0.275, + "acc,exam_id__2012-09": 0.2857142857142857, + "acc,exam_id__2011-03": 0.25252525252525254, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2011-05": 0.225, + "acc,exam_id__2016-20a": 0.2125, + "acc,exam_id__2014-14": 0.2375, + "acc,exam_id__2014-15": 0.34615384615384615, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2016-20": 0.3, + "acc,exam_id__2012-07": 0.3125, + "acc,exam_id__2016-21": 0.275, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.2507606130398931, + "acc,all": 0.30669800235017625 + }, + "tweetsentbr": { + "f1_macro,all": 0.2542010308555615, + "acc,all": 0.31094527363184077, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "ff3a49fac17555b8dfc4db6709f480cc8f16a9fe", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 1793374976, - "model_num_parameters": 494032768, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1307.2818627450981, - "min_seq_length": 1288, - "max_seq_length": 1369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1498.2818627450981, - "min_seq_length": 1479, - "max_seq_length": 1560, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1495.2378303198886, - "min_seq_length": 1175, - "max_seq_length": 2146, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1415.7809657102869, - "min_seq_length": 1185, - "max_seq_length": 2376, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1438.6876923076923, - "min_seq_length": 1394, - "max_seq_length": 1534, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "ff3a49fac17555b8dfc4db6709f480cc8f16a9fe", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 1793374976, + "model_num_parameters": 494032768, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1245.6992857142857, - "min_seq_length": 1227, - "max_seq_length": 1466, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1203.888382687927, - "min_seq_length": 974, - "max_seq_length": 1617, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1307.2818627450981, + "min_seq_length": 1288, + "max_seq_length": 1369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1498.2818627450981, + "min_seq_length": 1479, + "max_seq_length": 1560, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1495.2378303198886, + "min_seq_length": 1175, + "max_seq_length": 2146, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1415.7809657102869, + "min_seq_length": 1185, + "max_seq_length": 2376, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1438.6876923076923, + "min_seq_length": 1394, + "max_seq_length": 1534, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1245.6992857142857, + "min_seq_length": 1227, + "max_seq_length": 1466, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1203.888382687927, + "min_seq_length": 974, + "max_seq_length": 1617, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1688.62044653349, + "min_seq_length": 1658, + "max_seq_length": 1722, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1539.0567164179104, + "min_seq_length": 1521, + "max_seq_length": 1587, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1688.62044653349, - "min_seq_length": 1658, - "max_seq_length": 1722, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Qwen/Qwen2-0.5B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1539.0567164179104, - "min_seq_length": 1521, - "max_seq_length": 1587, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Qwen/Qwen2-0.5B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "f2a0116" + "git_hash": "f2a0116" } \ No newline at end of file diff --git a/Qwen/Qwen2-0.5B/results_2024-06-12T06-46-45.592981.json b/Qwen/Qwen2-0.5B/results_2024-06-12T06-46-45.592981.json index a6869702ea8c697a1c8e2c2febbcb943d890b7da..703cb6f09ee177dca7eeb35744f761a2b1e18227 100644 --- a/Qwen/Qwen2-0.5B/results_2024-06-12T06-46-45.592981.json +++ b/Qwen/Qwen2-0.5B/results_2024-06-12T06-46-45.592981.json @@ -34,17 +34,17 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.2758280555710745, - "all_grouped_npm": -0.09537842254161105, + "all_grouped_average": 0.3013631668708953, + "all_grouped_npm": -0.04768520009116168, "all_grouped": { "enem_challenge": 0.1980405878236529, "bluex": 0.22531293463143254, "oab_exams": 0.269248291571754, "assin2_rte": 0.5874337950921603, "assin2_sts": 0.02509703564375903, - "faquad_nli": 0.29245826137017844, + "faquad_nli": 0.43868739205526774, "hatebr_offensive": 0.4634868211245766, - "portuguese_hate_speech": 0.16717374202659538, + "portuguese_hate_speech": 0.2507606130398931, "tweetsentbr": 0.2542010308555615 }, "all": { @@ -53,9 +53,9 @@ "harness|oab_exams|oab_exams|None|3": 0.269248291571754, "harness|assin2_rte|assin2_rte|None|15": 0.5874337950921603, "harness|assin2_sts|assin2_sts|None|15": 0.02509703564375903, - "harness|faquad_nli|faquad_nli|None|15": 0.29245826137017844, + "harness|faquad_nli|faquad_nli|None|15": 0.43868739205526774, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4634868211245766, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.16717374202659538, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2507606130398931, "harness|tweetsentbr|tweetsentbr|None|25": 0.2542010308555615 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -135,9 +135,9 @@ "main_score": 0.02509703564375903 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.29245826137017844, + "f1_macro,all": 0.43868739205526774, "acc,all": 0.7815384615384615, - "main_score": 0.29245826137017844 + "main_score": 0.43868739205526774 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.4634868211245766, @@ -145,9 +145,9 @@ "main_score": 0.4634868211245766 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.16717374202659538, + "f1_macro,all": 0.2507606130398931, "acc,all": 0.30669800235017625, - "main_score": 0.16717374202659538 + "main_score": 0.2507606130398931 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.2542010308555615, diff --git a/RLHFlow/LLaMA3-iterative-DPO-final/raw_2024-06-12T18-34-30.568502/results.json b/RLHFlow/LLaMA3-iterative-DPO-final/raw_2024-06-12T18-34-30.568502/results.json index ab281e64e6b766866583918498a9aa8027ca61e4..d8535ec9f479eb2de7ea9c725af572eed0c90506 100644 --- a/RLHFlow/LLaMA3-iterative-DPO-final/raw_2024-06-12T18-34-30.568502/results.json +++ b/RLHFlow/LLaMA3-iterative-DPO-final/raw_2024-06-12T18-34-30.568502/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.92278946649082, - "acc,all": 0.9227941176470589, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7087993617619036, - "mse,all": 0.8367942776838234, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5799721835883171, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__USP_2019": 0.6, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__USP_2020": 0.6071428571428571, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2023": 0.5581395348837209, - "acc,exam_id__USP_2022": 0.6122448979591837, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2021": 0.5384615384615384, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6738978306508048, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2022": 0.6691729323308271, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2009": 0.6782608695652174, - "acc,exam_id__2015": 0.6470588235294118, - "acc,exam_id__2016_2": 0.7073170731707317, - "acc,exam_id__2023": 0.7333333333333333, - "acc,exam_id__2014": 0.6513761467889908, - "acc,exam_id__2016": 0.628099173553719, - "acc,exam_id__2012": 0.646551724137931, - "acc,exam_id__2010": 0.6752136752136753 - }, - "faquad_nli": { - "f1_macro,all": 0.6838748255929838, - "acc,all": 0.7184615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.5609080813037527, - "acc,all": 0.8428571428571429 - }, - "oab_exams": { - "acc,all": 0.46788154897494305, - "acc,exam_id__2011-03": 0.42424242424242425, - "acc,exam_id__2011-05": 0.4125, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2010-01": 0.4235294117647059, - "acc,exam_id__2012-06a": 0.4625, - "acc,exam_id__2013-10": 0.5, - "acc,exam_id__2012-07": 0.5125, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2014-14": 0.5625, - "acc,exam_id__2016-20a": 0.425, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2017-24": 0.4125, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2014-15": 0.6410256410256411, - "acc,exam_id__2016-21": 0.4375, - "acc,exam_id__2015-18": 0.4375, - "acc,exam_id__2012-08": 0.425, - "acc,exam_id__2010-02": 0.45, - "acc,exam_id__2012-06": 0.5125, - "acc,exam_id__2015-17": 0.5897435897435898, - "acc,exam_id__2016-20": 0.5, - "acc,exam_id__2016-19": 0.46153846153846156, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.44299611501372915, - "acc,all": 0.6721504112808461 - }, - "tweetsentbr": { - "f1_macro,all": 0.49680906213995335, - "acc,all": 0.7139303482587065, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.92278946649082, + "acc,all": 0.9227941176470589, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7087993617619036, + "mse,all": 0.8367942776838234, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5799721835883171, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__USP_2019": 0.6, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__USP_2020": 0.6071428571428571, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2023": 0.5581395348837209, + "acc,exam_id__USP_2022": 0.6122448979591837, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2021": 0.5384615384615384, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6738978306508048, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2022": 0.6691729323308271, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2009": 0.6782608695652174, + "acc,exam_id__2015": 0.6470588235294118, + "acc,exam_id__2016_2": 0.7073170731707317, + "acc,exam_id__2023": 0.7333333333333333, + "acc,exam_id__2014": 0.6513761467889908, + "acc,exam_id__2016": 0.628099173553719, + "acc,exam_id__2012": 0.646551724137931, + "acc,exam_id__2010": 0.6752136752136753 + }, + "faquad_nli": { + "f1_macro,all": 0.6838748255929838, + "acc,all": 0.7184615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.841362121955629, + "acc,all": 0.8428571428571429 + }, + "oab_exams": { + "acc,all": 0.46788154897494305, + "acc,exam_id__2011-03": 0.42424242424242425, + "acc,exam_id__2011-05": 0.4125, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2010-01": 0.4235294117647059, + "acc,exam_id__2012-06a": 0.4625, + "acc,exam_id__2013-10": 0.5, + "acc,exam_id__2012-07": 0.5125, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2014-14": 0.5625, + "acc,exam_id__2016-20a": 0.425, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2017-24": 0.4125, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2014-15": 0.6410256410256411, + "acc,exam_id__2016-21": 0.4375, + "acc,exam_id__2015-18": 0.4375, + "acc,exam_id__2012-08": 0.425, + "acc,exam_id__2010-02": 0.45, + "acc,exam_id__2012-06": 0.5125, + "acc,exam_id__2015-17": 0.5897435897435898, + "acc,exam_id__2016-20": 0.5, + "acc,exam_id__2016-19": 0.46153846153846156, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6644941725205937, + "acc,all": 0.6721504112808461 + }, + "tweetsentbr": { + "f1_macro,all": 0.6624120828532712, + "acc,all": 0.7139303482587065, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "360547e1086bbb02acd5f5f21e0fa256599f3b34", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16194748416, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 2, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1349.5322712418301, - "min_seq_length": 1330, - "max_seq_length": 1413, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1540.5322712418301, - "min_seq_length": 1521, - "max_seq_length": 1604, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1491.7719054242002, - "min_seq_length": 1172, - "max_seq_length": 2141, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1419.3547935619315, - "min_seq_length": 1194, - "max_seq_length": 2347, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1478.8215384615385, - "min_seq_length": 1433, - "max_seq_length": 1575, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "360547e1086bbb02acd5f5f21e0fa256599f3b34", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16194748416, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 2, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1330.3878571428572, - "min_seq_length": 1310, - "max_seq_length": 1549, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1227.3772209567198, - "min_seq_length": 995, - "max_seq_length": 1661, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1349.5322712418301, + "min_seq_length": 1330, + "max_seq_length": 1413, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1540.5322712418301, + "min_seq_length": 1521, + "max_seq_length": 1604, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1491.7719054242002, + "min_seq_length": 1172, + "max_seq_length": 2141, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1419.3547935619315, + "min_seq_length": 1194, + "max_seq_length": 2347, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1478.8215384615385, + "min_seq_length": 1433, + "max_seq_length": 1575, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1330.3878571428572, + "min_seq_length": 1310, + "max_seq_length": 1549, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1227.3772209567198, + "min_seq_length": 995, + "max_seq_length": 1661, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1727.4195064629848, + "min_seq_length": 1697, + "max_seq_length": 1759, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1588.1537313432837, + "min_seq_length": 1571, + "max_seq_length": 1636, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1727.4195064629848, - "min_seq_length": 1697, - "max_seq_length": 1759, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=RLHFlow/LLaMA3-iterative-DPO-final,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1588.1537313432837, - "min_seq_length": 1571, - "max_seq_length": 1636, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=RLHFlow/LLaMA3-iterative-DPO-final,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "f2a0116" + "git_hash": "f2a0116" } \ No newline at end of file diff --git a/RLHFlow/LLaMA3-iterative-DPO-final/results_2024-06-12T18-34-30.568502.json b/RLHFlow/LLaMA3-iterative-DPO-final/results_2024-06-12T18-34-30.568502.json index 6c1366f220d75737167997b5338bacaa2c02d5c5..185d104b7fa6e507c57cba7f2558aa180bda3f16 100644 --- a/RLHFlow/LLaMA3-iterative-DPO-final/results_2024-06-12T18-34-30.568502.json +++ b/RLHFlow/LLaMA3-iterative-DPO-final/results_2024-06-12T18-34-30.568502.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6153253861685787, - "all_grouped_npm": 0.4020100294129658, + "all_grouped_average": 0.689498177154363, + "all_grouped_npm": 0.5389524042332364, "all_grouped": { "enem_challenge": 0.6738978306508048, "bluex": 0.5799721835883171, @@ -43,9 +43,9 @@ "assin2_rte": 0.92278946649082, "assin2_sts": 0.7087993617619036, "faquad_nli": 0.6838748255929838, - "hatebr_offensive": 0.5609080813037527, - "portuguese_hate_speech": 0.44299611501372915, - "tweetsentbr": 0.49680906213995335 + "hatebr_offensive": 0.841362121955629, + "portuguese_hate_speech": 0.6644941725205937, + "tweetsentbr": 0.6624120828532712 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6738978306508048, @@ -54,9 +54,9 @@ "harness|assin2_rte|assin2_rte|None|15": 0.92278946649082, "harness|assin2_sts|assin2_sts|None|15": 0.7087993617619036, "harness|faquad_nli|faquad_nli|None|15": 0.6838748255929838, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5609080813037527, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.44299611501372915, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49680906213995335 + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.841362121955629, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6644941725205937, + "harness|tweetsentbr|tweetsentbr|None|25": 0.6624120828532712 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6738978306508048, @@ -140,19 +140,19 @@ "main_score": 0.6838748255929838 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.5609080813037527, + "f1_macro,all": 0.841362121955629, "acc,all": 0.8428571428571429, - "main_score": 0.5609080813037527 + "main_score": 0.841362121955629 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.44299611501372915, + "f1_macro,all": 0.6644941725205937, "acc,all": 0.6721504112808461, - "main_score": 0.44299611501372915 + "main_score": 0.6644941725205937 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49680906213995335, + "f1_macro,all": 0.6624120828532712, "acc,all": 0.7139303482587065, - "main_score": 0.49680906213995335 + "main_score": 0.6624120828532712 } }, "config_tasks": { diff --git a/Ramikan-BR/tinyllama-coder-py-4bit-v10/raw_2024-05-29T13-03-10.327337/results.json b/Ramikan-BR/tinyllama-coder-py-4bit-v10/raw_2024-05-29T13-03-10.327337/results.json index 6e6a2f341e4dbc47ef9c7c9aafc227ee0562d2fc..50f0f4bd97c4bee0de8bc4d7b315385503441682 100644 --- a/Ramikan-BR/tinyllama-coder-py-4bit-v10/raw_2024-05-29T13-03-10.327337/results.json +++ b/Ramikan-BR/tinyllama-coder-py-4bit-v10/raw_2024-05-29T13-03-10.327337/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.4105763517528223, - "acc,all": 0.4934640522875817, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.09183354777296178, - "mse,all": 2.153018790849673, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.18636995827538247, - "acc,exam_id__UNICAMP_2018": 0.2037037037037037, - "acc,exam_id__UNICAMP_2023": 0.3023255813953488, - "acc,exam_id__USP_2023": 0.11363636363636363, - "acc,exam_id__UNICAMP_2024": 0.17777777777777778, - "acc,exam_id__USP_2024": 0.14634146341463414, - "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739, - "acc,exam_id__USP_2020": 0.19642857142857142, - "acc,exam_id__UNICAMP_2020": 0.16363636363636364, - "acc,exam_id__UNICAMP_2022": 0.20512820512820512, - "acc,exam_id__UNICAMP_2019": 0.16, - "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, - "acc,exam_id__USP_2018": 0.1111111111111111, - "acc,exam_id__USP_2021": 0.15384615384615385, - "acc,exam_id__USP_2019": 0.275, - "acc,exam_id__USP_2022": 0.20408163265306123, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.1847445766270119, - "acc,exam_id__2011": 0.1623931623931624, - "acc,exam_id__2017": 0.21551724137931033, - "acc,exam_id__2015": 0.13445378151260504, - "acc,exam_id__2016": 0.15702479338842976, - "acc,exam_id__2016_2": 0.1951219512195122, - "acc,exam_id__2009": 0.20869565217391303, - "acc,exam_id__2012": 0.2413793103448276, - "acc,exam_id__2010": 0.18803418803418803, - "acc,exam_id__2013": 0.19444444444444445, - "acc,exam_id__2014": 0.11926605504587157, - "acc,exam_id__2022": 0.21052631578947367, - "acc,exam_id__2023": 0.18518518518518517 - }, - "faquad_nli": { - "f1_macro,all": 0.45367132867132864, - "acc,all": 0.7692307692307693, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.3558806058806059, - "acc,all": 0.5042857142857143 - }, - "oab_exams": { - "acc,all": 0.23097949886104785, - "acc,exam_id__2011-03": 0.26262626262626265, - "acc,exam_id__2014-13": 0.2375, - "acc,exam_id__2013-10": 0.25, - "acc,exam_id__2017-24": 0.2125, - "acc,exam_id__2017-22": 0.2375, - "acc,exam_id__2012-06a": 0.225, - "acc,exam_id__2016-20a": 0.2875, - "acc,exam_id__2012-09": 0.2597402597402597, - "acc,exam_id__2015-16": 0.25, - "acc,exam_id__2011-04": 0.2, - "acc,exam_id__2012-07": 0.225, - "acc,exam_id__2014-14": 0.2375, - "acc,exam_id__2014-15": 0.19230769230769232, - "acc,exam_id__2010-02": 0.25, - "acc,exam_id__2015-18": 0.25, - "acc,exam_id__2016-19": 0.19230769230769232, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2013-12": 0.225, - "acc,exam_id__2011-05": 0.2125, - "acc,exam_id__2017-23": 0.2, - "acc,exam_id__2013-11": 0.225, - "acc,exam_id__2016-20": 0.225, - "acc,exam_id__2016-21": 0.2125, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2010-01": 0.24705882352941178, - "acc,exam_id__2012-08": 0.225, - "acc,exam_id__2015-17": 0.15384615384615385, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.34895439657344424, - "acc,all": 0.6698002350176263 - }, - "tweetsentbr": { - "f1_macro,all": 0.22843538291107046, - "acc,all": 0.4572139303482587, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.4105763517528223, + "acc,all": 0.4934640522875817, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.09183354777296178, + "mse,all": 2.153018790849673, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.18636995827538247, + "acc,exam_id__UNICAMP_2018": 0.2037037037037037, + "acc,exam_id__UNICAMP_2023": 0.3023255813953488, + "acc,exam_id__USP_2023": 0.11363636363636363, + "acc,exam_id__UNICAMP_2024": 0.17777777777777778, + "acc,exam_id__USP_2024": 0.14634146341463414, + "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739, + "acc,exam_id__USP_2020": 0.19642857142857142, + "acc,exam_id__UNICAMP_2020": 0.16363636363636364, + "acc,exam_id__UNICAMP_2022": 0.20512820512820512, + "acc,exam_id__UNICAMP_2019": 0.16, + "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433, + "acc,exam_id__USP_2018": 0.1111111111111111, + "acc,exam_id__USP_2021": 0.15384615384615385, + "acc,exam_id__USP_2019": 0.275, + "acc,exam_id__USP_2022": 0.20408163265306123, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.1847445766270119, + "acc,exam_id__2011": 0.1623931623931624, + "acc,exam_id__2017": 0.21551724137931033, + "acc,exam_id__2015": 0.13445378151260504, + "acc,exam_id__2016": 0.15702479338842976, + "acc,exam_id__2016_2": 0.1951219512195122, + "acc,exam_id__2009": 0.20869565217391303, + "acc,exam_id__2012": 0.2413793103448276, + "acc,exam_id__2010": 0.18803418803418803, + "acc,exam_id__2013": 0.19444444444444445, + "acc,exam_id__2014": 0.11926605504587157, + "acc,exam_id__2022": 0.21052631578947367, + "acc,exam_id__2023": 0.18518518518518517 + }, + "faquad_nli": { + "f1_macro,all": 0.45367132867132864, + "acc,all": 0.7692307692307693, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.3558806058806059, + "acc,all": 0.5042857142857143 + }, + "oab_exams": { + "acc,all": 0.23097949886104785, + "acc,exam_id__2011-03": 0.26262626262626265, + "acc,exam_id__2014-13": 0.2375, + "acc,exam_id__2013-10": 0.25, + "acc,exam_id__2017-24": 0.2125, + "acc,exam_id__2017-22": 0.2375, + "acc,exam_id__2012-06a": 0.225, + "acc,exam_id__2016-20a": 0.2875, + "acc,exam_id__2012-09": 0.2597402597402597, + "acc,exam_id__2015-16": 0.25, + "acc,exam_id__2011-04": 0.2, + "acc,exam_id__2012-07": 0.225, + "acc,exam_id__2014-14": 0.2375, + "acc,exam_id__2014-15": 0.19230769230769232, + "acc,exam_id__2010-02": 0.25, + "acc,exam_id__2015-18": 0.25, + "acc,exam_id__2016-19": 0.19230769230769232, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2013-12": 0.225, + "acc,exam_id__2011-05": 0.2125, + "acc,exam_id__2017-23": 0.2, + "acc,exam_id__2013-11": 0.225, + "acc,exam_id__2016-20": 0.225, + "acc,exam_id__2016-21": 0.2125, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2010-01": 0.24705882352941178, + "acc,exam_id__2012-08": 0.225, + "acc,exam_id__2015-17": 0.15384615384615385, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5234315948601663, + "acc,all": 0.6698002350176263 + }, + "tweetsentbr": { + "f1_macro,all": 0.22843538291107046, + "acc,all": 0.4572139303482587, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "da5637dfee0566a9b0492cf2ede1d669fc3e98e9", - "model_dtype": "torch.float16", - "model_memory_footprint": 2200099584, - "model_num_parameters": 1100048384, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1580.9889705882354, - "min_seq_length": 1558, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1789.9889705882354, - "min_seq_length": 1767, - "max_seq_length": 1856, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1741.7426981919332, - "min_seq_length": 1375, - "max_seq_length": 2501, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.9881035689293, - "min_seq_length": 1368, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1788.1184615384616, - "min_seq_length": 1736, - "max_seq_length": 1895, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "da5637dfee0566a9b0492cf2ede1d669fc3e98e9", + "model_dtype": "torch.float16", + "model_memory_footprint": 2200099584, + "model_num_parameters": 1100048384, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1715.9178571428572, - "min_seq_length": 1692, - "max_seq_length": 1962, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1393.4145785876992, - "min_seq_length": 1138, - "max_seq_length": 1875, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1580.9889705882354, + "min_seq_length": 1558, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1789.9889705882354, + "min_seq_length": 1767, + "max_seq_length": 1856, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1741.7426981919332, + "min_seq_length": 1375, + "max_seq_length": 2501, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.9881035689293, + "min_seq_length": 1368, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1788.1184615384616, + "min_seq_length": 1736, + "max_seq_length": 1895, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1715.9178571428572, + "min_seq_length": 1692, + "max_seq_length": 1962, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1393.4145785876992, + "min_seq_length": 1138, + "max_seq_length": 1875, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2207.801410105758, + "min_seq_length": 2173, + "max_seq_length": 2251, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1982.6845771144278, + "min_seq_length": 1961, + "max_seq_length": 2100, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2207.801410105758, - "min_seq_length": 2173, - "max_seq_length": 2251, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Ramikan-BR/tinyllama-coder-py-4bit-v10,dtype=float16,device=cuda:0,revision=da5637d,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1982.6845771144278, - "min_seq_length": 1961, - "max_seq_length": 2100, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Ramikan-BR/tinyllama-coder-py-4bit-v10,dtype=float16,device=cuda:0,revision=da5637d,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/Ramikan-BR/tinyllama-coder-py-4bit-v10/results_2024-05-29T13-03-10.327337.json b/Ramikan-BR/tinyllama-coder-py-4bit-v10/results_2024-05-29T13-03-10.327337.json index 32b3befd7753deaf50e644adcb28b003c80121e4..01e3b9a106917f948fe4054ef8497ced0decdf60 100644 --- a/Ramikan-BR/tinyllama-coder-py-4bit-v10/results_2024-05-29T13-03-10.327337.json +++ b/Ramikan-BR/tinyllama-coder-py-4bit-v10/results_2024-05-29T13-03-10.327337.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.2768272941472973, - "all_grouped_npm": -0.0968419624453188, + "all_grouped_average": 0.29621364951248863, + "all_grouped_npm": -0.05963206731059453, "all_grouped": { "enem_challenge": 0.1847445766270119, "bluex": 0.18636995827538247, @@ -44,7 +44,7 @@ "assin2_sts": 0.09183354777296178, "faquad_nli": 0.45367132867132864, "hatebr_offensive": 0.3558806058806059, - "portuguese_hate_speech": 0.34895439657344424, + "portuguese_hate_speech": 0.5234315948601663, "tweetsentbr": 0.22843538291107046 }, "all": { @@ -55,7 +55,7 @@ "harness|assin2_sts|assin2_sts|None|15": 0.09183354777296178, "harness|faquad_nli|faquad_nli|None|15": 0.45367132867132864, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3558806058806059, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.34895439657344424, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5234315948601663, "harness|tweetsentbr|tweetsentbr|None|25": 0.22843538291107046 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -145,9 +145,9 @@ "main_score": 0.3558806058806059 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.34895439657344424, + "f1_macro,all": 0.5234315948601663, "acc,all": 0.6698002350176263, - "main_score": 0.34895439657344424 + "main_score": 0.5234315948601663 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.22843538291107046, diff --git a/RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2/raw_2024-07-04T01-29-22.119742/results.json b/RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2/raw_2024-07-04T01-29-22.119742/results.json index 82c6fd4329454d29e6c299b880890fe897f28ea7..4bdc7273e432fd4a8a807be6cad7c62854be8977 100644 --- a/RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2/raw_2024-07-04T01-29-22.119742/results.json +++ b/RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2/raw_2024-07-04T01-29-22.119742/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9264698029582955, - "acc,all": 0.9264705882352942, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7797640483645831, - "mse,all": 0.42933823529411763, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5340751043115438, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6487053883834849, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2010": 0.6837606837606838, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2009": 0.6086956521739131, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2023": 0.6666666666666666, - "acc,exam_id__2016_2": 0.5934959349593496, - "acc,exam_id__2017": 0.6982758620689655, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2016": 0.628099173553719, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2013": 0.7222222222222222 - }, - "faquad_nli": { - "f1_macro,all": 0.7802933673469388, - "acc,all": 0.8369230769230769, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7963636363636364, - "acc,all": 0.8028571428571428 - }, - "oab_exams": { - "acc,all": 0.4232346241457859, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2010-02": 0.46, - "acc,exam_id__2016-20a": 0.4125, - "acc,exam_id__2013-11": 0.4375, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2016-19": 0.5256410256410257, - "acc,exam_id__2015-17": 0.5641025641025641, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2012-07": 0.3375, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2010-01": 0.4235294117647059, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2014-13": 0.3, - "acc,exam_id__2015-18": 0.45, - "acc,exam_id__2018-25": 0.4625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6980102828187797, - "acc,all": 0.7473560517038778 - }, - "tweetsentbr": { - "f1_macro,all": 0.4922037957590686, - "acc,all": 0.7019900497512438, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9264698029582955, + "acc,all": 0.9264705882352942, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7797640483645831, + "mse,all": 0.42933823529411763, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5340751043115438, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6487053883834849, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2010": 0.6837606837606838, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2009": 0.6086956521739131, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2023": 0.6666666666666666, + "acc,exam_id__2016_2": 0.5934959349593496, + "acc,exam_id__2017": 0.6982758620689655, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2016": 0.628099173553719, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2013": 0.7222222222222222 + }, + "faquad_nli": { + "f1_macro,all": 0.7802933673469388, + "acc,all": 0.8369230769230769, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7963636363636364, + "acc,all": 0.8028571428571428 + }, + "oab_exams": { + "acc,all": 0.4232346241457859, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2010-02": 0.46, + "acc,exam_id__2016-20a": 0.4125, + "acc,exam_id__2013-11": 0.4375, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2016-19": 0.5256410256410257, + "acc,exam_id__2015-17": 0.5641025641025641, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2012-07": 0.3375, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2010-01": 0.4235294117647059, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2014-13": 0.3, + "acc,exam_id__2015-18": 0.45, + "acc,exam_id__2018-25": 0.4625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6980102828187797, + "acc,all": 0.7473560517038778 + }, + "tweetsentbr": { + "f1_macro,all": 0.6562717276787581, + "acc,all": 0.7019900497512438, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "fb0f72b9914a81892bfeea5a04fcd9676c883d64", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 26295156736, - "model_num_parameters": 12879138816, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "fb0f72b9914a81892bfeea5a04fcd9676c883d64", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 26295156736, + "model_num_parameters": 12879138816, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2/results_2024-07-04T01-29-22.119742.json b/RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2/results_2024-07-04T01-29-22.119742.json index af43a245b95ee1066c8326e4b6d092b5fbf40a81..d0ea33cad8eede71a9342d750c2061243cfd2395 100644 --- a/RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2/results_2024-07-04T01-29-22.119742.json +++ b/RubielLabarta/LogoS-7Bx2-MoE-13B-v0.2/results_2024-07-04T01-29-22.119742.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6754577833835684, - "all_grouped_npm": 0.5196602637498469, + "all_grouped_average": 0.6936875535968673, + "all_grouped_npm": 0.5467878979958272, "all_grouped": { "enem_challenge": 0.6487053883834849, "bluex": 0.5340751043115438, @@ -45,7 +45,7 @@ "faquad_nli": 0.7802933673469388, "hatebr_offensive": 0.7963636363636364, "portuguese_hate_speech": 0.6980102828187797, - "tweetsentbr": 0.4922037957590686 + "tweetsentbr": 0.6562717276787581 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6487053883834849, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7802933673469388, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7963636363636364, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6980102828187797, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4922037957590686 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6562717276787581 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6487053883834849, @@ -150,9 +150,9 @@ "main_score": 0.6980102828187797 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4922037957590686, + "f1_macro,all": 0.6562717276787581, "acc,all": 0.7019900497512438, - "main_score": 0.4922037957590686 + "main_score": 0.6562717276787581 } }, "config_tasks": { diff --git a/SeaLLMs/SeaLLM-7B-v2/raw_2024-08-08T05-46-17.568983/results.json b/SeaLLMs/SeaLLM-7B-v2/raw_2024-08-08T05-46-17.568983/results.json index 83eb397e34f78bb1f88a4f0dd5f09788b53485b2..80c2ff4e28e181c9acab06437fb0700cd576063d 100644 --- a/SeaLLMs/SeaLLM-7B-v2/raw_2024-08-08T05-46-17.568983/results.json +++ b/SeaLLMs/SeaLLM-7B-v2/raw_2024-08-08T05-46-17.568983/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.910123460086579, - "acc,all": 0.9101307189542484, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7586077181075638, - "mse,all": 0.5324959150326797, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5173852573018081, - "acc,exam_id__UNICAMP_2018": 0.3888888888888889, - "acc,exam_id__UNICAMP_2019": 0.48, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2020": 0.41818181818181815, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2023": 0.4883720930232558, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__USP_2022": 0.5102040816326531, - "acc,exam_id__USP_2019": 0.575, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__USP_2024": 0.6585365853658537, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6403079076277117, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2011": 0.7264957264957265, - "acc,exam_id__2014": 0.6422018348623854, - "acc,exam_id__2016": 0.6033057851239669, - "acc,exam_id__2016_2": 0.6341463414634146, - "acc,exam_id__2023": 0.674074074074074, - "acc,exam_id__2022": 0.6240601503759399, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2015": 0.5882352941176471, - "acc,exam_id__2010": 0.5897435897435898, - "acc,exam_id__2012": 0.6551724137931034, - "acc,exam_id__2009": 0.6173913043478261 - }, - "faquad_nli": { - "f1_macro,all": 0.7842076261469852, - "acc,all": 0.8507692307692307, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8023504608685286, - "acc,all": 0.8071428571428572 - }, - "oab_exams": { - "acc,all": 0.4255125284738041, - "acc,exam_id__2012-07": 0.4125, - "acc,exam_id__2011-04": 0.3625, - "acc,exam_id__2012-08": 0.4375, - "acc,exam_id__2017-24": 0.4, - "acc,exam_id__2015-16": 0.3375, - "acc,exam_id__2012-06a": 0.45, - "acc,exam_id__2015-18": 0.4625, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2011-05": 0.5, - "acc,exam_id__2010-02": 0.43, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2016-20a": 0.425, - "acc,exam_id__2016-20": 0.4375, - "acc,exam_id__2011-03": 0.41414141414141414, - "acc,exam_id__2015-17": 0.5256410256410257, - "acc,exam_id__2014-14": 0.575, - "acc,exam_id__2013-10": 0.45, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2012-06": 0.3625, - "acc,exam_id__2016-21": 0.275, - "acc,exam_id__2010-01": 0.36470588235294116, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6971031649588798, - "acc,all": 0.7332549941245593 - }, - "tweetsentbr": { - "f1_macro,all": 0.4483979151220403, - "acc,all": 0.6766169154228856, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.910123460086579, + "acc,all": 0.9101307189542484, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7586077181075638, + "mse,all": 0.5324959150326797, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5173852573018081, + "acc,exam_id__UNICAMP_2018": 0.3888888888888889, + "acc,exam_id__UNICAMP_2019": 0.48, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2020": 0.41818181818181815, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2023": 0.4883720930232558, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__USP_2022": 0.5102040816326531, + "acc,exam_id__USP_2019": 0.575, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__USP_2024": 0.6585365853658537, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6403079076277117, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2011": 0.7264957264957265, + "acc,exam_id__2014": 0.6422018348623854, + "acc,exam_id__2016": 0.6033057851239669, + "acc,exam_id__2016_2": 0.6341463414634146, + "acc,exam_id__2023": 0.674074074074074, + "acc,exam_id__2022": 0.6240601503759399, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2015": 0.5882352941176471, + "acc,exam_id__2010": 0.5897435897435898, + "acc,exam_id__2012": 0.6551724137931034, + "acc,exam_id__2009": 0.6173913043478261 + }, + "faquad_nli": { + "f1_macro,all": 0.7842076261469852, + "acc,all": 0.8507692307692307, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8023504608685286, + "acc,all": 0.8071428571428572 + }, + "oab_exams": { + "acc,all": 0.4255125284738041, + "acc,exam_id__2012-07": 0.4125, + "acc,exam_id__2011-04": 0.3625, + "acc,exam_id__2012-08": 0.4375, + "acc,exam_id__2017-24": 0.4, + "acc,exam_id__2015-16": 0.3375, + "acc,exam_id__2012-06a": 0.45, + "acc,exam_id__2015-18": 0.4625, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2011-05": 0.5, + "acc,exam_id__2010-02": 0.43, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2016-20a": 0.425, + "acc,exam_id__2016-20": 0.4375, + "acc,exam_id__2011-03": 0.41414141414141414, + "acc,exam_id__2015-17": 0.5256410256410257, + "acc,exam_id__2014-14": 0.575, + "acc,exam_id__2013-10": 0.45, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2012-06": 0.3625, + "acc,exam_id__2016-21": 0.275, + "acc,exam_id__2010-01": 0.36470588235294116, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6971031649588798, + "acc,all": 0.7332549941245593 + }, + "tweetsentbr": { + "f1_macro,all": 0.597863886829387, + "acc,all": 0.6766169154228856, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "04d5d6102eb0865f4a0ca55fe8d12478605748f8", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14751907840, - "model_num_parameters": 7375949824, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.2581699346406, - "min_seq_length": 1651, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1884.2581699346406, - "min_seq_length": 1860, - "max_seq_length": 1951, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1787.2531293463144, - "min_seq_length": 1413, - "max_seq_length": 2582, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1693.5045486354093, - "min_seq_length": 1427, - "max_seq_length": 2694, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1900.2276923076922, - "min_seq_length": 1845, - "max_seq_length": 2019, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "04d5d6102eb0865f4a0ca55fe8d12478605748f8", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14751907840, + "model_num_parameters": 7375949824, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1801.1064285714285, - "min_seq_length": 1778, - "max_seq_length": 2049, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1442.24145785877, - "min_seq_length": 1177, - "max_seq_length": 1934, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.2581699346406, + "min_seq_length": 1651, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1884.2581699346406, + "min_seq_length": 1860, + "max_seq_length": 1951, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1787.2531293463144, + "min_seq_length": 1413, + "max_seq_length": 2582, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1693.5045486354093, + "min_seq_length": 1427, + "max_seq_length": 2694, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1900.2276923076922, + "min_seq_length": 1845, + "max_seq_length": 2019, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1801.1064285714285, + "min_seq_length": 1778, + "max_seq_length": 2049, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1442.24145785877, + "min_seq_length": 1177, + "max_seq_length": 1934, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2295.840188014101, + "min_seq_length": 2260, + "max_seq_length": 2335, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 2048.842288557214, + "min_seq_length": 2028, + "max_seq_length": 2143, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2295.840188014101, - "min_seq_length": 2260, - "max_seq_length": 2335, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=SeaLLMs/SeaLLM-7B-v2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 2048.842288557214, - "min_seq_length": 2028, - "max_seq_length": 2143, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=SeaLLMs/SeaLLM-7B-v2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/SeaLLMs/SeaLLM-7B-v2/results_2024-08-08T05-46-17.568983.json b/SeaLLMs/SeaLLM-7B-v2/results_2024-08-08T05-46-17.568983.json index 02f6fe82529210101bccd7dbf10378454a81d8d1..c7f303bd56c128e9e90901dd0526a87195b57375 100644 --- a/SeaLLMs/SeaLLM-7B-v2/results_2024-08-08T05-46-17.568983.json +++ b/SeaLLMs/SeaLLM-7B-v2/results_2024-08-08T05-46-17.568983.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6648884487437667, - "all_grouped_npm": 0.5051487746981883, + "all_grouped_average": 0.6814957789334719, + "all_grouped_npm": 0.529862063670964, "all_grouped": { "enem_challenge": 0.6403079076277117, "bluex": 0.5173852573018081, @@ -45,7 +45,7 @@ "faquad_nli": 0.7842076261469852, "hatebr_offensive": 0.8023504608685286, "portuguese_hate_speech": 0.6971031649588798, - "tweetsentbr": 0.4483979151220403 + "tweetsentbr": 0.597863886829387 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6403079076277117, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7842076261469852, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8023504608685286, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6971031649588798, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4483979151220403 + "harness|tweetsentbr|tweetsentbr|None|25": 0.597863886829387 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6403079076277117, @@ -150,9 +150,9 @@ "main_score": 0.6971031649588798 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4483979151220403, + "f1_macro,all": 0.597863886829387, "acc,all": 0.6766169154228856, - "main_score": 0.4483979151220403 + "main_score": 0.597863886829387 } }, "config_tasks": { diff --git a/THUDM/chatglm3-6b/raw_2024-04-05T15-27-35.144602/results.json b/THUDM/chatglm3-6b/raw_2024-04-05T15-27-35.144602/results.json index 1ac541fc90e17e5c90d2cfd06b817bf3b832f2ea..973ee7b3cd7d9912c0ed241d39b3d133dc9f588b 100644 --- a/THUDM/chatglm3-6b/raw_2024-04-05T15-27-35.144602/results.json +++ b/THUDM/chatglm3-6b/raw_2024-04-05T15-27-35.144602/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.5540993203540044, - "acc,all": 0.8312908496732027, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.578670198377361, - "mse,all": 1.1979901960784314, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.3560500695410292, - "acc,exam_id__UNICAMP_2018": 0.3148148148148148, - "acc,exam_id__USP_2018": 0.3333333333333333, - "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__USP_2022": 0.2857142857142857, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__UNICAMP_2019": 0.3, - "acc,exam_id__UNICAMP_2022": 0.4358974358974359, - "acc,exam_id__USP_2021": 0.38461538461538464, - "acc,exam_id__UNICAMP_2021_1": 0.3695652173913043, - "acc,exam_id__USP_2020": 0.39285714285714285, - "acc,exam_id__USP_2023": 0.4090909090909091, - "acc,exam_id__UNICAMP_2023": 0.37209302325581395, - "acc,exam_id__USP_2024": 0.36585365853658536, - "acc,exam_id__UNICAMP_2020": 0.36363636363636365, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.43037088873338, - "acc,exam_id__2012": 0.41379310344827586, - "acc,exam_id__2010": 0.4017094017094017, - "acc,exam_id__2011": 0.42735042735042733, - "acc,exam_id__2009": 0.41739130434782606, - "acc,exam_id__2014": 0.41284403669724773, - "acc,exam_id__2016": 0.45454545454545453, - "acc,exam_id__2017": 0.4827586206896552, - "acc,exam_id__2022": 0.40601503759398494, - "acc,exam_id__2023": 0.4148148148148148, - "acc,exam_id__2016_2": 0.3983739837398374, - "acc,exam_id__2015": 0.4789915966386555, - "acc,exam_id__2013": 0.46296296296296297 - }, - "faquad_nli": { - "f1_macro,all": 0.44678749965564074, - "acc,all": 0.7538461538461538, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8026874828971815, - "acc,all": 0.8042857142857143 - }, - "oab_exams": { - "acc,all": 0.32938496583143506, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2010-02": 0.4, - "acc,exam_id__2015-16": 0.3625, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2011-05": 0.4, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2010-01": 0.3176470588235294, - "acc,exam_id__2014-15": 0.32051282051282054, - "acc,exam_id__2016-19": 0.34615384615384615, - "acc,exam_id__2016-21": 0.3, - "acc,exam_id__2015-18": 0.2875, - "acc,exam_id__2011-03": 0.26262626262626265, - "acc,exam_id__2014-14": 0.3625, - "acc,exam_id__2016-20": 0.2875, - "acc,exam_id__2013-12": 0.3, - "acc,exam_id__2017-23": 0.3125, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2015-17": 0.28205128205128205, - "acc,exam_id__2012-06": 0.2875, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2013-10": 0.325, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2013-11": 0.35, - "acc,exam_id__2017-22": 0.375, - "acc,exam_id__2017-24": 0.2875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5543391521197008, - "acc,all": 0.5558166862514688 - }, - "tweetsentbr": { - "f1_macro,all": 0.48732305784857605, - "acc,all": 0.5686567164179105, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8311489805310066, + "acc,all": 0.8312908496732027, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.578670198377361, + "mse,all": 1.1979901960784314, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.3560500695410292, + "acc,exam_id__UNICAMP_2018": 0.3148148148148148, + "acc,exam_id__USP_2018": 0.3333333333333333, + "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__USP_2022": 0.2857142857142857, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__UNICAMP_2019": 0.3, + "acc,exam_id__UNICAMP_2022": 0.4358974358974359, + "acc,exam_id__USP_2021": 0.38461538461538464, + "acc,exam_id__UNICAMP_2021_1": 0.3695652173913043, + "acc,exam_id__USP_2020": 0.39285714285714285, + "acc,exam_id__USP_2023": 0.4090909090909091, + "acc,exam_id__UNICAMP_2023": 0.37209302325581395, + "acc,exam_id__USP_2024": 0.36585365853658536, + "acc,exam_id__UNICAMP_2020": 0.36363636363636365, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.43037088873338, + "acc,exam_id__2012": 0.41379310344827586, + "acc,exam_id__2010": 0.4017094017094017, + "acc,exam_id__2011": 0.42735042735042733, + "acc,exam_id__2009": 0.41739130434782606, + "acc,exam_id__2014": 0.41284403669724773, + "acc,exam_id__2016": 0.45454545454545453, + "acc,exam_id__2017": 0.4827586206896552, + "acc,exam_id__2022": 0.40601503759398494, + "acc,exam_id__2023": 0.4148148148148148, + "acc,exam_id__2016_2": 0.3983739837398374, + "acc,exam_id__2015": 0.4789915966386555, + "acc,exam_id__2013": 0.46296296296296297 + }, + "faquad_nli": { + "f1_macro,all": 0.6701812494834612, + "acc,all": 0.7538461538461538, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8026874828971815, + "acc,all": 0.8042857142857143 + }, + "oab_exams": { + "acc,all": 0.32938496583143506, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2010-02": 0.4, + "acc,exam_id__2015-16": 0.3625, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2011-05": 0.4, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2010-01": 0.3176470588235294, + "acc,exam_id__2014-15": 0.32051282051282054, + "acc,exam_id__2016-19": 0.34615384615384615, + "acc,exam_id__2016-21": 0.3, + "acc,exam_id__2015-18": 0.2875, + "acc,exam_id__2011-03": 0.26262626262626265, + "acc,exam_id__2014-14": 0.3625, + "acc,exam_id__2016-20": 0.2875, + "acc,exam_id__2013-12": 0.3, + "acc,exam_id__2017-23": 0.3125, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2015-17": 0.28205128205128205, + "acc,exam_id__2012-06": 0.2875, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2013-10": 0.325, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2013-11": 0.35, + "acc,exam_id__2017-22": 0.375, + "acc,exam_id__2017-24": 0.2875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5543391521197008, + "acc,all": 0.5558166862514688 + }, + "tweetsentbr": { + "f1_macro,all": 0.48732305784857605, + "acc,all": 0.5686567164179105, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 223, - "non_truncated": 13927, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 280, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "103caa40027ebfd8450289ca2f278eac4ff26405", - "model_dtype": "torch.float16", - "model_memory_footprint": 12487168064, - "model_num_parameters": 6243584000, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1488.0543300653594, - "min_seq_length": 1464, - "max_seq_length": 1558, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1693.0543300653594, - "min_seq_length": 1669, - "max_seq_length": 1763, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 144, - "non_truncated": 575, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 199, - "mean_seq_length": 1831.5452016689846, - "min_seq_length": 1434, - "max_seq_length": 2674, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.7232267037552154 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 79, - "non_truncated": 1350, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 81, - "mean_seq_length": 1717.410076976907, - "min_seq_length": 1436, - "max_seq_length": 2734, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9433170048985304 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1699.8892307692308, - "min_seq_length": 1641, - "max_seq_length": 1821, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1444.5421428571428, - "min_seq_length": 1419, - "max_seq_length": 1711, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 223, + "non_truncated": 13927, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 280, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "103caa40027ebfd8450289ca2f278eac4ff26405", + "model_dtype": "torch.float16", + "model_memory_footprint": 12487168064, + "model_num_parameters": 6243584000, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1474.1808656036446, - "min_seq_length": 1189, - "max_seq_length": 2000, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1488.0543300653594, + "min_seq_length": 1464, + "max_seq_length": 1558, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1693.0543300653594, + "min_seq_length": 1669, + "max_seq_length": 1763, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 144, + "non_truncated": 575, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 199, + "mean_seq_length": 1831.5452016689846, + "min_seq_length": 1434, + "max_seq_length": 2674, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.7232267037552154 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 79, + "non_truncated": 1350, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 81, + "mean_seq_length": 1717.410076976907, + "min_seq_length": 1436, + "max_seq_length": 2734, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9433170048985304 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1699.8892307692308, + "min_seq_length": 1641, + "max_seq_length": 1821, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1444.5421428571428, + "min_seq_length": 1419, + "max_seq_length": 1711, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1474.1808656036446, + "min_seq_length": 1189, + "max_seq_length": 2000, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1950.6580493537015, + "min_seq_length": 1914, + "max_seq_length": 1987, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1718.6363184079603, + "min_seq_length": 1697, + "max_seq_length": 1814, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1950.6580493537015, - "min_seq_length": 1914, - "max_seq_length": 1987, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=THUDM/chatglm3-6b,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1718.6363184079603, - "min_seq_length": 1697, - "max_seq_length": 1814, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=THUDM/chatglm3-6b,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/THUDM/chatglm3-6b/results_2024-04-05T15-27-35.144602.json b/THUDM/chatglm3-6b/results_2024-04-05T15-27-35.144602.json index ee0ec5795c5a4dccd85ce0c196d6e43c0d17c773..eb7b72a920a2d66a84fcae2c6115b7567e369ad5 100644 --- a/THUDM/chatglm3-6b/results_2024-04-05T15-27-35.144602.json +++ b/THUDM/chatglm3-6b/results_2024-04-05T15-27-35.144602.json @@ -34,15 +34,15 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5044125150398121, - "all_grouped_npm": 0.24665654886160196, + "all_grouped_average": 0.5600173383736813, + "all_grouped_npm": 0.3538509483919121, "all_grouped": { "enem_challenge": 0.43037088873338, "bluex": 0.3560500695410292, "oab_exams": 0.32938496583143506, - "assin2_rte": 0.5540993203540044, + "assin2_rte": 0.8311489805310066, "assin2_sts": 0.578670198377361, - "faquad_nli": 0.44678749965564074, + "faquad_nli": 0.6701812494834612, "hatebr_offensive": 0.8026874828971815, "portuguese_hate_speech": 0.5543391521197008, "tweetsentbr": 0.48732305784857605 @@ -51,9 +51,9 @@ "harness|enem_challenge|enem_challenge|None|3": 0.43037088873338, "harness|bluex|bluex|None|3": 0.3560500695410292, "harness|oab_exams|oab_exams|None|3": 0.32938496583143506, - "harness|assin2_rte|assin2_rte|None|15": 0.5540993203540044, + "harness|assin2_rte|assin2_rte|None|15": 0.8311489805310066, "harness|assin2_sts|assin2_sts|None|15": 0.578670198377361, - "harness|faquad_nli|faquad_nli|None|15": 0.44678749965564074, + "harness|faquad_nli|faquad_nli|None|15": 0.6701812494834612, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8026874828971815, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5543391521197008, "harness|tweetsentbr|tweetsentbr|None|25": 0.48732305784857605 @@ -125,9 +125,9 @@ "main_score": 0.32938496583143506 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.5540993203540044, + "f1_macro,all": 0.8311489805310066, "acc,all": 0.8312908496732027, - "main_score": 0.5540993203540044 + "main_score": 0.8311489805310066 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.578670198377361, @@ -135,9 +135,9 @@ "main_score": 0.578670198377361 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.44678749965564074, + "f1_macro,all": 0.6701812494834612, "acc,all": 0.7538461538461538, - "main_score": 0.44678749965564074 + "main_score": 0.6701812494834612 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.8026874828971815, diff --git a/TheBloke/zephyr-7B-beta-GPTQ/raw_2024-05-29T07-56-34.401974/results.json b/TheBloke/zephyr-7B-beta-GPTQ/raw_2024-05-29T07-56-34.401974/results.json index 55f812677e4ea98a0f71f6f657ca9b19ca949004..247855f27c4ee35adc9ede4cebabe7d2ae4d2c4c 100644 --- a/TheBloke/zephyr-7B-beta-GPTQ/raw_2024-05-29T07-56-34.401974/results.json +++ b/TheBloke/zephyr-7B-beta-GPTQ/raw_2024-05-29T07-56-34.401974/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.5831734998147651, - "acc,all": 0.875, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6848780483036166, - "mse,all": 0.6463398692810457, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.46870653685674546, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__UNICAMP_2023": 0.4418604651162791, - "acc,exam_id__USP_2023": 0.5454545454545454, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__USP_2024": 0.6585365853658537, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__UNICAMP_2020": 0.4727272727272727, - "acc,exam_id__UNICAMP_2022": 0.5128205128205128, - "acc,exam_id__UNICAMP_2019": 0.46, - "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, - "acc,exam_id__USP_2018": 0.3888888888888889, - "acc,exam_id__USP_2021": 0.3076923076923077, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2022": 0.5306122448979592, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5374387683694891, - "acc,exam_id__2011": 0.6239316239316239, - "acc,exam_id__2017": 0.5172413793103449, - "acc,exam_id__2015": 0.5042016806722689, - "acc,exam_id__2016": 0.4628099173553719, - "acc,exam_id__2016_2": 0.5609756097560976, - "acc,exam_id__2009": 0.5565217391304348, - "acc,exam_id__2012": 0.5603448275862069, - "acc,exam_id__2010": 0.5555555555555556, - "acc,exam_id__2013": 0.5833333333333334, - "acc,exam_id__2014": 0.5688073394495413, - "acc,exam_id__2022": 0.48120300751879697, - "acc,exam_id__2023": 0.4962962962962963 - }, - "faquad_nli": { - "f1_macro,all": 0.7380971253434792, - "acc,all": 0.8123076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8330124462550406, - "acc,all": 0.8335714285714285 - }, - "oab_exams": { - "acc,all": 0.3826879271070615, - "acc,exam_id__2011-03": 0.36363636363636365, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2017-24": 0.325, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2016-20a": 0.2625, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2015-16": 0.3625, - "acc,exam_id__2011-04": 0.3375, - "acc,exam_id__2012-07": 0.3, - "acc,exam_id__2014-14": 0.3875, - "acc,exam_id__2014-15": 0.5384615384615384, - "acc,exam_id__2010-02": 0.41, - "acc,exam_id__2015-18": 0.4, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2012-06": 0.3875, - "acc,exam_id__2013-12": 0.4125, - "acc,exam_id__2011-05": 0.3875, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2013-11": 0.3875, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2016-21": 0.35, - "acc,exam_id__2018-25": 0.375, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2015-17": 0.4358974358974359, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6748662787280315, - "acc,all": 0.6944770857814336 - }, - "tweetsentbr": { - "f1_macro,all": 0.4270736435976256, - "acc,all": 0.6477611940298508, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8747602497221476, + "acc,all": 0.875, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6848780483036166, + "mse,all": 0.6463398692810457, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.46870653685674546, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__UNICAMP_2023": 0.4418604651162791, + "acc,exam_id__USP_2023": 0.5454545454545454, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__USP_2024": 0.6585365853658537, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__UNICAMP_2020": 0.4727272727272727, + "acc,exam_id__UNICAMP_2022": 0.5128205128205128, + "acc,exam_id__UNICAMP_2019": 0.46, + "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, + "acc,exam_id__USP_2018": 0.3888888888888889, + "acc,exam_id__USP_2021": 0.3076923076923077, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2022": 0.5306122448979592, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5374387683694891, + "acc,exam_id__2011": 0.6239316239316239, + "acc,exam_id__2017": 0.5172413793103449, + "acc,exam_id__2015": 0.5042016806722689, + "acc,exam_id__2016": 0.4628099173553719, + "acc,exam_id__2016_2": 0.5609756097560976, + "acc,exam_id__2009": 0.5565217391304348, + "acc,exam_id__2012": 0.5603448275862069, + "acc,exam_id__2010": 0.5555555555555556, + "acc,exam_id__2013": 0.5833333333333334, + "acc,exam_id__2014": 0.5688073394495413, + "acc,exam_id__2022": 0.48120300751879697, + "acc,exam_id__2023": 0.4962962962962963 + }, + "faquad_nli": { + "f1_macro,all": 0.7380971253434792, + "acc,all": 0.8123076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8330124462550406, + "acc,all": 0.8335714285714285 + }, + "oab_exams": { + "acc,all": 0.3826879271070615, + "acc,exam_id__2011-03": 0.36363636363636365, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2017-24": 0.325, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2016-20a": 0.2625, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2015-16": 0.3625, + "acc,exam_id__2011-04": 0.3375, + "acc,exam_id__2012-07": 0.3, + "acc,exam_id__2014-14": 0.3875, + "acc,exam_id__2014-15": 0.5384615384615384, + "acc,exam_id__2010-02": 0.41, + "acc,exam_id__2015-18": 0.4, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2012-06": 0.3875, + "acc,exam_id__2013-12": 0.4125, + "acc,exam_id__2011-05": 0.3875, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2013-11": 0.3875, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2016-21": 0.35, + "acc,exam_id__2018-25": 0.375, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2015-17": 0.4358974358974359, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6748662787280315, + "acc,all": 0.6944770857814336 + }, + "tweetsentbr": { + "f1_macro,all": 0.5694315247968341, + "acc,all": 0.6477611940298508, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "8128029fba795c423004d08695fdda8491289748", - "model_dtype": "torch.float16", - "model_memory_footprint": 8185069568, - "model_num_parameters": 1136201728, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1646.7455065359477, - "min_seq_length": 1623, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1855.7455065359477, - "min_seq_length": 1832, - "max_seq_length": 1922, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1792.9262865090404, - "min_seq_length": 1416, - "max_seq_length": 2593, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1693.039188243527, - "min_seq_length": 1427, - "max_seq_length": 2691, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1871.9876923076922, - "min_seq_length": 1816, - "max_seq_length": 1992, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "8128029fba795c423004d08695fdda8491289748", + "model_dtype": "torch.float16", + "model_memory_footprint": 8185069568, + "model_num_parameters": 1136201728, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1752.3878571428572, - "min_seq_length": 1729, - "max_seq_length": 2003, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1438.764464692483, - "min_seq_length": 1172, - "max_seq_length": 1941, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1646.7455065359477, + "min_seq_length": 1623, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1855.7455065359477, + "min_seq_length": 1832, + "max_seq_length": 1922, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1792.9262865090404, + "min_seq_length": 1416, + "max_seq_length": 2593, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1693.039188243527, + "min_seq_length": 1427, + "max_seq_length": 2691, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1871.9876923076922, + "min_seq_length": 1816, + "max_seq_length": 1992, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1752.3878571428572, + "min_seq_length": 1729, + "max_seq_length": 2003, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1438.764464692483, + "min_seq_length": 1172, + "max_seq_length": 1941, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2253.3360752056406, + "min_seq_length": 2218, + "max_seq_length": 2292, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1999.2492537313433, + "min_seq_length": 1978, + "max_seq_length": 2094, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2253.3360752056406, - "min_seq_length": 2218, - "max_seq_length": 2292, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=TheBloke/zephyr-7B-beta-GPTQ,autogptq=True,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1999.2492537313433, - "min_seq_length": 1978, - "max_seq_length": 2094, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=TheBloke/zephyr-7B-beta-GPTQ,autogptq=True,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/TheBloke/zephyr-7B-beta-GPTQ/results_2024-05-29T07-56-34.401974.json b/TheBloke/zephyr-7B-beta-GPTQ/results_2024-05-29T07-56-34.401974.json index a448d3ec3b2ea8d806ce26a19fae6c9a5be66e6f..39bc9e4aaa3c941ebd4b9ea8afa8741c7ddcd353 100644 --- a/TheBloke/zephyr-7B-beta-GPTQ/results_2024-05-29T07-56-34.401974.json +++ b/TheBloke/zephyr-7B-beta-GPTQ/results_2024-05-29T07-56-34.401974.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.592214919375095, - "all_grouped_npm": 0.3858178386057269, + "all_grouped_average": 0.6404309894980496, + "all_grouped_npm": 0.4741529035982419, "all_grouped": { "enem_challenge": 0.5374387683694891, "bluex": 0.46870653685674546, "oab_exams": 0.3826879271070615, - "assin2_rte": 0.5831734998147651, + "assin2_rte": 0.8747602497221476, "assin2_sts": 0.6848780483036166, "faquad_nli": 0.7380971253434792, "hatebr_offensive": 0.8330124462550406, "portuguese_hate_speech": 0.6748662787280315, - "tweetsentbr": 0.4270736435976256 + "tweetsentbr": 0.5694315247968341 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5374387683694891, "harness|bluex|bluex|None|3": 0.46870653685674546, "harness|oab_exams|oab_exams|None|3": 0.3826879271070615, - "harness|assin2_rte|assin2_rte|None|15": 0.5831734998147651, + "harness|assin2_rte|assin2_rte|None|15": 0.8747602497221476, "harness|assin2_sts|assin2_sts|None|15": 0.6848780483036166, "harness|faquad_nli|faquad_nli|None|15": 0.7380971253434792, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8330124462550406, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6748662787280315, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4270736435976256 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5694315247968341 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5374387683694891, @@ -125,9 +125,9 @@ "main_score": 0.3826879271070615 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.5831734998147651, + "f1_macro,all": 0.8747602497221476, "acc,all": 0.875, - "main_score": 0.5831734998147651 + "main_score": 0.8747602497221476 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.6848780483036166, @@ -150,9 +150,9 @@ "main_score": 0.6748662787280315 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4270736435976256, + "f1_macro,all": 0.5694315247968341, "acc,all": 0.6477611940298508, - "main_score": 0.4270736435976256 + "main_score": 0.5694315247968341 } }, "config_tasks": { diff --git a/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T/raw_2024-02-17T14-58-12.044782/results.json b/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T/raw_2024-02-17T14-58-12.044782/results.json index 043883cf58d0dab80fe1ad273bc3a834d3b8d4bc..47a1dddc4209febec7a276842f822c7fdb2d378f 100644 --- a/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T/raw_2024-02-17T14-58-12.044782/results.json +++ b/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T/raw_2024-02-17T14-58-12.044782/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.589309397637424, - "acc,all": 0.5894607843137255, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.13574937116211067, - "mse,all": 2.428954248366013, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.22809457579972184, - "acc,exam_id__UNICAMP_2020": 0.2727272727272727, - "acc,exam_id__UNICAMP_2021_2": 0.2549019607843137, - "acc,exam_id__USP_2018": 0.2222222222222222, - "acc,exam_id__UNICAMP_2023": 0.27906976744186046, - "acc,exam_id__USP_2024": 0.21951219512195122, - "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, - "acc,exam_id__USP_2020": 0.14285714285714285, - "acc,exam_id__USP_2019": 0.2, - "acc,exam_id__USP_2023": 0.11363636363636363, - "acc,exam_id__UNICAMP_2019": 0.28, - "acc,exam_id__USP_2022": 0.14285714285714285, - "acc,exam_id__UNICAMP_2024": 0.2222222222222222, - "acc,exam_id__USP_2021": 0.23076923076923078, - "acc,exam_id__UNICAMP_2022": 0.3076923076923077, - "acc,exam_id__UNICAMP_2018": 0.3148148148148148, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.2225332400279916, - "acc,exam_id__2023": 0.2, - "acc,exam_id__2010": 0.24786324786324787, - "acc,exam_id__2016_2": 0.1951219512195122, - "acc,exam_id__2011": 0.18803418803418803, - "acc,exam_id__2022": 0.18796992481203006, - "acc,exam_id__2015": 0.20168067226890757, - "acc,exam_id__2013": 0.24074074074074073, - "acc,exam_id__2014": 0.28440366972477066, - "acc,exam_id__2009": 0.2608695652173913, - "acc,exam_id__2016": 0.2231404958677686, - "acc,exam_id__2012": 0.19827586206896552, - "acc,exam_id__2017": 0.25862068965517243 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.369192959530931, - "acc,all": 0.5064285714285715 - }, - "oab_exams": { - "acc,all": 0.23644646924829157, - "acc,exam_id__2016-20": 0.25, - "acc,exam_id__2015-17": 0.2564102564102564, - "acc,exam_id__2014-13": 0.1875, - "acc,exam_id__2018-25": 0.325, - "acc,exam_id__2017-22": 0.275, - "acc,exam_id__2014-14": 0.1875, - "acc,exam_id__2013-11": 0.2375, - "acc,exam_id__2015-18": 0.225, - "acc,exam_id__2011-05": 0.2125, - "acc,exam_id__2012-08": 0.2625, - "acc,exam_id__2012-06a": 0.2125, - "acc,exam_id__2017-24": 0.175, - "acc,exam_id__2012-07": 0.1875, - "acc,exam_id__2016-20a": 0.275, - "acc,exam_id__2016-21": 0.225, - "acc,exam_id__2016-19": 0.28205128205128205, - "acc,exam_id__2010-01": 0.29411764705882354, - "acc,exam_id__2011-04": 0.3125, - "acc,exam_id__2012-06": 0.1625, - "acc,exam_id__2017-23": 0.2375, - "acc,exam_id__2012-09": 0.19480519480519481, - "acc,exam_id__2013-12": 0.2375, - "acc,exam_id__2013-10": 0.2, - "acc,exam_id__2011-03": 0.2222222222222222, - "acc,exam_id__2015-16": 0.25, - "acc,exam_id__2010-02": 0.27, - "acc,exam_id__2014-15": 0.21794871794871795, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.3639419831744621, - "acc,all": 0.618096357226792 - }, - "tweetsentbr": { - "f1_macro,all": 0.31998368175826974, - "acc,all": 0.427363184079602, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.589309397637424, + "acc,all": 0.5894607843137255, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.13574937116211067, + "mse,all": 2.428954248366013, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.22809457579972184, + "acc,exam_id__UNICAMP_2020": 0.2727272727272727, + "acc,exam_id__UNICAMP_2021_2": 0.2549019607843137, + "acc,exam_id__USP_2018": 0.2222222222222222, + "acc,exam_id__UNICAMP_2023": 0.27906976744186046, + "acc,exam_id__USP_2024": 0.21951219512195122, + "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, + "acc,exam_id__USP_2020": 0.14285714285714285, + "acc,exam_id__USP_2019": 0.2, + "acc,exam_id__USP_2023": 0.11363636363636363, + "acc,exam_id__UNICAMP_2019": 0.28, + "acc,exam_id__USP_2022": 0.14285714285714285, + "acc,exam_id__UNICAMP_2024": 0.2222222222222222, + "acc,exam_id__USP_2021": 0.23076923076923078, + "acc,exam_id__UNICAMP_2022": 0.3076923076923077, + "acc,exam_id__UNICAMP_2018": 0.3148148148148148, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.2225332400279916, + "acc,exam_id__2023": 0.2, + "acc,exam_id__2010": 0.24786324786324787, + "acc,exam_id__2016_2": 0.1951219512195122, + "acc,exam_id__2011": 0.18803418803418803, + "acc,exam_id__2022": 0.18796992481203006, + "acc,exam_id__2015": 0.20168067226890757, + "acc,exam_id__2013": 0.24074074074074073, + "acc,exam_id__2014": 0.28440366972477066, + "acc,exam_id__2009": 0.2608695652173913, + "acc,exam_id__2016": 0.2231404958677686, + "acc,exam_id__2012": 0.19827586206896552, + "acc,exam_id__2017": 0.25862068965517243 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.369192959530931, + "acc,all": 0.5064285714285715 + }, + "oab_exams": { + "acc,all": 0.23644646924829157, + "acc,exam_id__2016-20": 0.25, + "acc,exam_id__2015-17": 0.2564102564102564, + "acc,exam_id__2014-13": 0.1875, + "acc,exam_id__2018-25": 0.325, + "acc,exam_id__2017-22": 0.275, + "acc,exam_id__2014-14": 0.1875, + "acc,exam_id__2013-11": 0.2375, + "acc,exam_id__2015-18": 0.225, + "acc,exam_id__2011-05": 0.2125, + "acc,exam_id__2012-08": 0.2625, + "acc,exam_id__2012-06a": 0.2125, + "acc,exam_id__2017-24": 0.175, + "acc,exam_id__2012-07": 0.1875, + "acc,exam_id__2016-20a": 0.275, + "acc,exam_id__2016-21": 0.225, + "acc,exam_id__2016-19": 0.28205128205128205, + "acc,exam_id__2010-01": 0.29411764705882354, + "acc,exam_id__2011-04": 0.3125, + "acc,exam_id__2012-06": 0.1625, + "acc,exam_id__2017-23": 0.2375, + "acc,exam_id__2012-09": 0.19480519480519481, + "acc,exam_id__2013-12": 0.2375, + "acc,exam_id__2013-10": 0.2, + "acc,exam_id__2011-03": 0.2222222222222222, + "acc,exam_id__2015-16": 0.25, + "acc,exam_id__2010-02": 0.27, + "acc,exam_id__2014-15": 0.21794871794871795, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5459129747616931, + "acc,all": 0.618096357226792 + }, + "tweetsentbr": { + "f1_macro,all": 0.31998368175826974, + "acc,all": 0.427363184079602, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 47, - "non_truncated": 14103, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 62, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "036fa4651240b9a1487f709833b9e4b96b4c1574", - "model_dtype": "torch.float16", - "model_memory_footprint": 2211633920, - "model_num_parameters": 1100048384, - "model_is_loaded_in_4bit": false, - "model_is_loaded_in_8bit": false, - "model_is_quantized": null, - "model_device": "cuda:1", - "batch_size": 64, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1303.9889705882354, - "min_seq_length": 1281, - "max_seq_length": 1370, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1527.9889705882354, - "min_seq_length": 1505, - "max_seq_length": 1594, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 34, - "non_truncated": 685, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 47, - "mean_seq_length": 1668.7426981919332, - "min_seq_length": 1302, - "max_seq_length": 2428, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.934631432545202 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 13, - "non_truncated": 1416, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 15, - "mean_seq_length": 1547.9881035689293, - "min_seq_length": 1295, - "max_seq_length": 2587, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9895031490552832 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1511.1184615384616, - "min_seq_length": 1459, - "max_seq_length": 1618, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1268.9178571428572, - "min_seq_length": 1245, - "max_seq_length": 1515, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 47, + "non_truncated": 14103, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 62, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "036fa4651240b9a1487f709833b9e4b96b4c1574", + "model_dtype": "torch.float16", + "model_memory_footprint": 2211633920, + "model_num_parameters": 1100048384, + "model_is_loaded_in_4bit": false, + "model_is_loaded_in_8bit": false, + "model_is_quantized": null, + "model_device": "cuda:1", + "batch_size": 64, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1320.4145785876992, - "min_seq_length": 1065, - "max_seq_length": 1802, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1303.9889705882354, + "min_seq_length": 1281, + "max_seq_length": 1370, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1527.9889705882354, + "min_seq_length": 1505, + "max_seq_length": 1594, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 34, + "non_truncated": 685, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 47, + "mean_seq_length": 1668.7426981919332, + "min_seq_length": 1302, + "max_seq_length": 2428, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.934631432545202 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 13, + "non_truncated": 1416, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 15, + "mean_seq_length": 1547.9881035689293, + "min_seq_length": 1295, + "max_seq_length": 2587, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9895031490552832 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1511.1184615384616, + "min_seq_length": 1459, + "max_seq_length": 1618, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1268.9178571428572, + "min_seq_length": 1245, + "max_seq_length": 1515, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1320.4145785876992, + "min_seq_length": 1065, + "max_seq_length": 1802, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1760.801410105758, + "min_seq_length": 1726, + "max_seq_length": 1804, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1527.6845771144278, + "min_seq_length": 1506, + "max_seq_length": 1645, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1760.801410105758, - "min_seq_length": 1726, - "max_seq_length": 1804, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T,dtype=float16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1527.6845771144278, - "min_seq_length": 1506, - "max_seq_length": 1645, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T,dtype=float16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T/results_2024-02-17T14-58-12.044782.json b/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T/results_2024-02-17T14-58-12.044782.json index f1d83f6dd751977e28e22659096812c04d7a99c0..5ae07a00b9a0f9234451f2ce5409e95c2b41e183 100644 --- a/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T/results_2024-02-17T14-58-12.044782.json +++ b/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T/results_2024-02-17T14-58-12.044782.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.32276742786144397, - "all_grouped_npm": -0.0217747938879217, + "all_grouped_average": 0.3429864269266918, + "all_grouped_npm": 0.01703326573827386, "all_grouped": { "enem_challenge": 0.2225332400279916, "bluex": 0.22809457579972184, @@ -44,7 +44,7 @@ "assin2_sts": 0.13574937116211067, "faquad_nli": 0.4396551724137931, "hatebr_offensive": 0.369192959530931, - "portuguese_hate_speech": 0.3639419831744621, + "portuguese_hate_speech": 0.5459129747616931, "tweetsentbr": 0.31998368175826974 }, "all": { @@ -55,7 +55,7 @@ "harness|assin2_sts|assin2_sts|None|15": 0.13574937116211067, "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.369192959530931, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3639419831744621, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5459129747616931, "harness|tweetsentbr|tweetsentbr|None|25": 0.31998368175826974 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -145,9 +145,9 @@ "main_score": 0.369192959530931 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.3639419831744621, + "f1_macro,all": 0.5459129747616931, "acc,all": 0.618096357226792, - "main_score": 0.3639419831744621 + "main_score": 0.5459129747616931 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.31998368175826974, diff --git a/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2/raw_2024-08-09T03-18-44.156930/results.json b/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2/raw_2024-08-09T03-18-44.156930/results.json index 58f63b1dc57311abd152f4c3e70d5244ac779138..a8ab43ea9eb93db8245725db854f6ed979cb1c19 100644 --- a/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2/raw_2024-08-09T03-18-44.156930/results.json +++ b/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2/raw_2024-08-09T03-18-44.156930/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9091166324509491, - "acc,all": 0.9093137254901961, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7141518081879549, - "mse,all": 0.8912990196078432, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5771905424200278, - "acc,exam_id__USP_2021": 0.6153846153846154, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__USP_2022": 0.6122448979591837, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2024": 0.6, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2020": 0.5892857142857143, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.701889433170049, - "acc,exam_id__2016_2": 0.6991869918699187, - "acc,exam_id__2015": 0.7058823529411765, - "acc,exam_id__2011": 0.7264957264957265, - "acc,exam_id__2022": 0.6466165413533834, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2016": 0.6694214876033058, - "acc,exam_id__2014": 0.7064220183486238, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2009": 0.7130434782608696, - "acc,exam_id__2010": 0.7264957264957265, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2023": 0.762962962962963 - }, - "faquad_nli": { - "f1_macro,all": 0.6749819434413022, - "acc,all": 0.7092307692307692, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8639647822209131, - "acc,all": 0.8642857142857143 - }, - "oab_exams": { - "acc,all": 0.5006833712984055, - "acc,exam_id__2015-17": 0.6282051282051282, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2011-04": 0.525, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2012-06": 0.5375, - "acc,exam_id__2011-03": 0.41414141414141414, - "acc,exam_id__2016-19": 0.5641025641025641, - "acc,exam_id__2012-07": 0.5, - "acc,exam_id__2012-06a": 0.6, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2015-16": 0.5, - "acc,exam_id__2014-13": 0.4375, - "acc,exam_id__2012-09": 0.44155844155844154, - "acc,exam_id__2016-20a": 0.4125, - "acc,exam_id__2017-22": 0.5875, - "acc,exam_id__2017-24": 0.4375, - "acc,exam_id__2014-15": 0.6025641025641025, - "acc,exam_id__2018-25": 0.4875, - "acc,exam_id__2016-20": 0.525, - "acc,exam_id__2012-08": 0.5, - "acc,exam_id__2013-10": 0.4875, - "acc,exam_id__2013-11": 0.525, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2010-02": 0.51, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2014-14": 0.5875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6639551413678724, - "acc,all": 0.6709753231492362 - }, - "tweetsentbr": { - "f1_macro,all": 0.4799299921041461, - "acc,all": 0.7089552238805971, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9091166324509491, + "acc,all": 0.9093137254901961, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7141518081879549, + "mse,all": 0.8912990196078432, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5771905424200278, + "acc,exam_id__USP_2021": 0.6153846153846154, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__USP_2022": 0.6122448979591837, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2024": 0.6, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2020": 0.5892857142857143, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.701889433170049, + "acc,exam_id__2016_2": 0.6991869918699187, + "acc,exam_id__2015": 0.7058823529411765, + "acc,exam_id__2011": 0.7264957264957265, + "acc,exam_id__2022": 0.6466165413533834, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2016": 0.6694214876033058, + "acc,exam_id__2014": 0.7064220183486238, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2009": 0.7130434782608696, + "acc,exam_id__2010": 0.7264957264957265, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2023": 0.762962962962963 + }, + "faquad_nli": { + "f1_macro,all": 0.6749819434413022, + "acc,all": 0.7092307692307692, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8639647822209131, + "acc,all": 0.8642857142857143 + }, + "oab_exams": { + "acc,all": 0.5006833712984055, + "acc,exam_id__2015-17": 0.6282051282051282, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2011-04": 0.525, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2012-06": 0.5375, + "acc,exam_id__2011-03": 0.41414141414141414, + "acc,exam_id__2016-19": 0.5641025641025641, + "acc,exam_id__2012-07": 0.5, + "acc,exam_id__2012-06a": 0.6, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2015-16": 0.5, + "acc,exam_id__2014-13": 0.4375, + "acc,exam_id__2012-09": 0.44155844155844154, + "acc,exam_id__2016-20a": 0.4125, + "acc,exam_id__2017-22": 0.5875, + "acc,exam_id__2017-24": 0.4375, + "acc,exam_id__2014-15": 0.6025641025641025, + "acc,exam_id__2018-25": 0.4875, + "acc,exam_id__2016-20": 0.525, + "acc,exam_id__2012-08": 0.5, + "acc,exam_id__2013-10": 0.4875, + "acc,exam_id__2013-11": 0.525, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2010-02": 0.51, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2014-14": 0.5875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6639551413678724, + "acc,all": 0.6709753231492362 + }, + "tweetsentbr": { + "f1_macro,all": 0.6399066561388616, + "acc,all": 0.7089552238805971, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "730c7207d4b538feeb3c2e6d6f6a6ba8615a9be3", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530944, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "730c7207d4b538feeb3c2e6d6f6a6ba8615a9be3", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530944, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2/results_2024-08-09T03-18-44.156930.json b/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2/results_2024-08-09T03-18-44.156930.json index b0f6d4291e6e8cbb3be3e9dff9991d54f5958559..7c21c339e443e6c03e8a5c7b739c309cce45fd6c 100644 --- a/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2/results_2024-08-09T03-18-44.156930.json +++ b/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter2/results_2024-08-09T03-18-44.156930.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6762070718512911, - "all_grouped_npm": 0.5177763599949778, + "all_grouped_average": 0.6939822567440372, + "all_grouped_npm": 0.5442275279901359, "all_grouped": { "enem_challenge": 0.701889433170049, "bluex": 0.5771905424200278, @@ -45,7 +45,7 @@ "faquad_nli": 0.6749819434413022, "hatebr_offensive": 0.8639647822209131, "portuguese_hate_speech": 0.6639551413678724, - "tweetsentbr": 0.4799299921041461 + "tweetsentbr": 0.6399066561388616 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.701889433170049, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6749819434413022, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8639647822209131, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6639551413678724, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4799299921041461 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6399066561388616 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.701889433170049, @@ -150,9 +150,9 @@ "main_score": 0.6639551413678724 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4799299921041461, + "f1_macro,all": 0.6399066561388616, "acc,all": 0.7089552238805971, - "main_score": 0.4799299921041461 + "main_score": 0.6399066561388616 } }, "config_tasks": { diff --git a/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3/raw_2024-08-09T04-19-33.155639/results.json b/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3/raw_2024-08-09T04-19-33.155639/results.json index b41cefb1d293fd5d6355e239058eb626e6a35475..4466d2e900335344df3f3b2238fa63046d4d3d9e 100644 --- a/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3/raw_2024-08-09T04-19-33.155639/results.json +++ b/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3/raw_2024-08-09T04-19-33.155639/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9112109957323393, - "acc,all": 0.9113562091503268, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7038791413087024, - "mse,all": 1.0363567956862745, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5799721835883171, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2024": 0.6, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__USP_2019": 0.6, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2021": 0.5769230769230769, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__USP_2022": 0.5918367346938775, - "acc,exam_id__UNICAMP_2019": 0.58, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6990902729181245, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2010": 0.717948717948718, - "acc,exam_id__2015": 0.7226890756302521, - "acc,exam_id__2016": 0.6776859504132231, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2012": 0.6896551724137931, - "acc,exam_id__2023": 0.7703703703703704, - "acc,exam_id__2022": 0.6466165413533834, - "acc,exam_id__2016_2": 0.6910569105691057, - "acc,exam_id__2014": 0.6972477064220184, - "acc,exam_id__2009": 0.6956521739130435, - "acc,exam_id__2011": 0.717948717948718 - }, - "faquad_nli": { - "f1_macro,all": 0.6519970018203234, - "acc,all": 0.6846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8558459980359402, - "acc,all": 0.8564285714285714 - }, - "oab_exams": { - "acc,all": 0.5002277904328019, - "acc,exam_id__2011-04": 0.4875, - "acc,exam_id__2015-18": 0.5, - "acc,exam_id__2015-17": 0.6282051282051282, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2014-13": 0.45, - "acc,exam_id__2017-22": 0.6, - "acc,exam_id__2010-02": 0.49, - "acc,exam_id__2012-08": 0.5125, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2014-14": 0.6125, - "acc,exam_id__2017-24": 0.45, - "acc,exam_id__2012-07": 0.5, - "acc,exam_id__2012-09": 0.4675324675324675, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2012-06": 0.525, - "acc,exam_id__2013-10": 0.5, - "acc,exam_id__2016-19": 0.5769230769230769, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2014-15": 0.5769230769230769, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2015-16": 0.5125, - "acc,exam_id__2013-12": 0.55, - "acc,exam_id__2011-03": 0.40404040404040403, - "acc,exam_id__2013-11": 0.525, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2016-20": 0.5125, - "acc,exam_id__2012-06a": 0.625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.681933183830717, - "acc,all": 0.6944770857814336 - }, - "tweetsentbr": { - "f1_macro,all": 0.47901037868352103, - "acc,all": 0.708457711442786, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9112109957323393, + "acc,all": 0.9113562091503268, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7038791413087024, + "mse,all": 1.0363567956862745, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5799721835883171, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2024": 0.6, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__USP_2019": 0.6, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2021": 0.5769230769230769, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__USP_2022": 0.5918367346938775, + "acc,exam_id__UNICAMP_2019": 0.58, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6990902729181245, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2010": 0.717948717948718, + "acc,exam_id__2015": 0.7226890756302521, + "acc,exam_id__2016": 0.6776859504132231, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2012": 0.6896551724137931, + "acc,exam_id__2023": 0.7703703703703704, + "acc,exam_id__2022": 0.6466165413533834, + "acc,exam_id__2016_2": 0.6910569105691057, + "acc,exam_id__2014": 0.6972477064220184, + "acc,exam_id__2009": 0.6956521739130435, + "acc,exam_id__2011": 0.717948717948718 + }, + "faquad_nli": { + "f1_macro,all": 0.6519970018203234, + "acc,all": 0.6846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8558459980359402, + "acc,all": 0.8564285714285714 + }, + "oab_exams": { + "acc,all": 0.5002277904328019, + "acc,exam_id__2011-04": 0.4875, + "acc,exam_id__2015-18": 0.5, + "acc,exam_id__2015-17": 0.6282051282051282, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2014-13": 0.45, + "acc,exam_id__2017-22": 0.6, + "acc,exam_id__2010-02": 0.49, + "acc,exam_id__2012-08": 0.5125, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2014-14": 0.6125, + "acc,exam_id__2017-24": 0.45, + "acc,exam_id__2012-07": 0.5, + "acc,exam_id__2012-09": 0.4675324675324675, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2012-06": 0.525, + "acc,exam_id__2013-10": 0.5, + "acc,exam_id__2016-19": 0.5769230769230769, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2014-15": 0.5769230769230769, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2015-16": 0.5125, + "acc,exam_id__2013-12": 0.55, + "acc,exam_id__2011-03": 0.40404040404040403, + "acc,exam_id__2013-11": 0.525, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2016-20": 0.5125, + "acc,exam_id__2012-06a": 0.625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.681933183830717, + "acc,all": 0.6944770857814336 + }, + "tweetsentbr": { + "f1_macro,all": 0.6386805049113614, + "acc,all": 0.708457711442786, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "48c29bf2d9d68113255df9a47a9dabff6c67a13f", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530944, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "48c29bf2d9d68113255df9a47a9dabff6c67a13f", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530944, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3/results_2024-08-09T04-19-33.155639.json b/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3/results_2024-08-09T04-19-33.155639.json index 339b9ef8effede51caa862d9ccdb2b9dd054a5d8..9d67308f8cee62dc21b68616e2f502e75380b3d7 100644 --- a/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3/results_2024-08-09T04-19-33.155639.json +++ b/UCLA-AGI/Llama-3-Instruct-8B-SPPO-Iter3/results_2024-08-09T04-19-33.155639.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6736852162611985, - "all_grouped_npm": 0.5142261278099851, + "all_grouped_average": 0.6914263413976252, + "all_grouped_npm": 0.5406266116439534, "all_grouped": { "enem_challenge": 0.6990902729181245, "bluex": 0.5799721835883171, @@ -45,7 +45,7 @@ "faquad_nli": 0.6519970018203234, "hatebr_offensive": 0.8558459980359402, "portuguese_hate_speech": 0.681933183830717, - "tweetsentbr": 0.47901037868352103 + "tweetsentbr": 0.6386805049113614 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6990902729181245, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6519970018203234, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8558459980359402, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.681933183830717, - "harness|tweetsentbr|tweetsentbr|None|25": 0.47901037868352103 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6386805049113614 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6990902729181245, @@ -150,9 +150,9 @@ "main_score": 0.681933183830717 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.47901037868352103, + "f1_macro,all": 0.6386805049113614, "acc,all": 0.708457711442786, - "main_score": 0.47901037868352103 + "main_score": 0.6386805049113614 } }, "config_tasks": { diff --git a/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2/raw_2024-08-09T04-26-37.476674/results.json b/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2/raw_2024-08-09T04-26-37.476674/results.json index 9283538f886dda651ea14bf545d063e003c67841..e0e4859dd337057a5f1a25756029806e13012e57 100644 --- a/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2/raw_2024-08-09T04-26-37.476674/results.json +++ b/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2/raw_2024-08-09T04-26-37.476674/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9056240104288098, - "acc,all": 0.9056372549019608, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7150956617943389, - "mse,all": 1.1899427634803923, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5062586926286509, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__USP_2022": 0.4489795918367347, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__USP_2018": 0.42592592592592593, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2023": 0.4186046511627907, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__UNICAMP_2020": 0.509090909090909, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__USP_2020": 0.44642857142857145, - "acc,exam_id__UNICAMP_2022": 0.5128205128205128, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5948215535339398, - "acc,exam_id__2016_2": 0.5853658536585366, - "acc,exam_id__2015": 0.5714285714285714, - "acc,exam_id__2011": 0.6752136752136753, - "acc,exam_id__2022": 0.5413533834586466, - "acc,exam_id__2013": 0.6018518518518519, - "acc,exam_id__2016": 0.5785123966942148, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2017": 0.5344827586206896, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2010": 0.5897435897435898, - "acc,exam_id__2012": 0.5948275862068966, - "acc,exam_id__2023": 0.6222222222222222 - }, - "faquad_nli": { - "f1_macro,all": 0.4366431726939579, - "acc,all": 0.6907692307692308, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7873748146886901, - "acc,all": 0.7935714285714286 - }, - "oab_exams": { - "acc,all": 0.379498861047836, - "acc,exam_id__2015-17": 0.6025641025641025, - "acc,exam_id__2013-12": 0.4, - "acc,exam_id__2015-18": 0.3375, - "acc,exam_id__2011-04": 0.3, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2017-23": 0.35, - "acc,exam_id__2015-16": 0.325, - "acc,exam_id__2014-13": 0.2625, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2016-20a": 0.2625, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2017-24": 0.3375, - "acc,exam_id__2014-15": 0.3974358974358974, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2016-20": 0.3125, - "acc,exam_id__2012-08": 0.425, - "acc,exam_id__2013-10": 0.35, - "acc,exam_id__2013-11": 0.3875, - "acc,exam_id__2011-05": 0.3375, - "acc,exam_id__2010-02": 0.4, - "acc,exam_id__2010-01": 0.3176470588235294, - "acc,exam_id__2014-14": 0.4375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.452572111335143, - "acc,all": 0.7426556991774383 - }, - "tweetsentbr": { - "f1_macro,all": 0.5025244326214171, - "acc,all": 0.6985074626865672, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9056240104288098, + "acc,all": 0.9056372549019608, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7150956617943389, + "mse,all": 1.1899427634803923, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5062586926286509, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__USP_2022": 0.4489795918367347, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__USP_2018": 0.42592592592592593, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2023": 0.4186046511627907, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__UNICAMP_2020": 0.509090909090909, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__USP_2020": 0.44642857142857145, + "acc,exam_id__UNICAMP_2022": 0.5128205128205128, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5948215535339398, + "acc,exam_id__2016_2": 0.5853658536585366, + "acc,exam_id__2015": 0.5714285714285714, + "acc,exam_id__2011": 0.6752136752136753, + "acc,exam_id__2022": 0.5413533834586466, + "acc,exam_id__2013": 0.6018518518518519, + "acc,exam_id__2016": 0.5785123966942148, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2017": 0.5344827586206896, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2010": 0.5897435897435898, + "acc,exam_id__2012": 0.5948275862068966, + "acc,exam_id__2023": 0.6222222222222222 + }, + "faquad_nli": { + "f1_macro,all": 0.6549647590409369, + "acc,all": 0.6907692307692308, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7873748146886901, + "acc,all": 0.7935714285714286 + }, + "oab_exams": { + "acc,all": 0.379498861047836, + "acc,exam_id__2015-17": 0.6025641025641025, + "acc,exam_id__2013-12": 0.4, + "acc,exam_id__2015-18": 0.3375, + "acc,exam_id__2011-04": 0.3, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2017-23": 0.35, + "acc,exam_id__2015-16": 0.325, + "acc,exam_id__2014-13": 0.2625, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2016-20a": 0.2625, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2017-24": 0.3375, + "acc,exam_id__2014-15": 0.3974358974358974, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2016-20": 0.3125, + "acc,exam_id__2012-08": 0.425, + "acc,exam_id__2013-10": 0.35, + "acc,exam_id__2013-11": 0.3875, + "acc,exam_id__2011-05": 0.3375, + "acc,exam_id__2010-02": 0.4, + "acc,exam_id__2010-01": 0.3176470588235294, + "acc,exam_id__2014-14": 0.4375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6788581670027144, + "acc,all": 0.7426556991774383 + }, + "tweetsentbr": { + "f1_macro,all": 0.6700325768285561, + "acc,all": 0.6985074626865672, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "8201064df67b5762ff9f361ff1b98aae3747855c", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14483472384, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "8201064df67b5762ff9f361ff1b98aae3747855c", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14483472384, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2/results_2024-08-09T04-26-37.476674.json b/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2/results_2024-08-09T04-26-37.476674.json index 7384b9b0901427a5b729313b776a97b614367f1e..89cf1ba38cd4ce975b529e7c011dd405c14c0f38 100644 --- a/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2/results_2024-08-09T04-26-37.476674.json +++ b/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter2/results_2024-08-09T04-26-37.476674.json @@ -34,18 +34,18 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5867125900858648, - "all_grouped_npm": 0.36706678932834724, + "all_grouped_average": 0.6547254552216082, + "all_grouped_npm": 0.48761398470969564, "all_grouped": { "enem_challenge": 0.5948215535339398, "bluex": 0.5062586926286509, "oab_exams": 0.379498861047836, "assin2_rte": 0.9056240104288098, "assin2_sts": 0.7150956617943389, - "faquad_nli": 0.4366431726939579, + "faquad_nli": 0.6549647590409369, "hatebr_offensive": 0.7873748146886901, - "portuguese_hate_speech": 0.452572111335143, - "tweetsentbr": 0.5025244326214171 + "portuguese_hate_speech": 0.6788581670027144, + "tweetsentbr": 0.6700325768285561 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5948215535339398, @@ -53,10 +53,10 @@ "harness|oab_exams|oab_exams|None|3": 0.379498861047836, "harness|assin2_rte|assin2_rte|None|15": 0.9056240104288098, "harness|assin2_sts|assin2_sts|None|15": 0.7150956617943389, - "harness|faquad_nli|faquad_nli|None|15": 0.4366431726939579, + "harness|faquad_nli|faquad_nli|None|15": 0.6549647590409369, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7873748146886901, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.452572111335143, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5025244326214171 + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6788581670027144, + "harness|tweetsentbr|tweetsentbr|None|25": 0.6700325768285561 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5948215535339398, @@ -135,9 +135,9 @@ "main_score": 0.7150956617943389 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.4366431726939579, + "f1_macro,all": 0.6549647590409369, "acc,all": 0.6907692307692308, - "main_score": 0.4366431726939579 + "main_score": 0.6549647590409369 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.7873748146886901, @@ -145,14 +145,14 @@ "main_score": 0.7873748146886901 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.452572111335143, + "f1_macro,all": 0.6788581670027144, "acc,all": 0.7426556991774383, - "main_score": 0.452572111335143 + "main_score": 0.6788581670027144 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5025244326214171, + "f1_macro,all": 0.6700325768285561, "acc,all": 0.6985074626865672, - "main_score": 0.5025244326214171 + "main_score": 0.6700325768285561 } }, "config_tasks": { diff --git a/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3/raw_2024-08-09T05-34-49.243967/results.json b/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3/raw_2024-08-09T05-34-49.243967/results.json index 0b2206942a518e76781c31d3bae0c9d52f2976d9..b637116b2e477f4bccce9d1d40123492bab0530b 100644 --- a/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3/raw_2024-08-09T05-34-49.243967/results.json +++ b/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3/raw_2024-08-09T05-34-49.243967/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.6036966175773639, - "acc,all": 0.9048202614379085, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7078631967714201, - "mse,all": 1.2318439950980393, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5104311543810849, - "acc,exam_id__UNICAMP_2023": 0.4186046511627907, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2020": 0.4909090909090909, - "acc,exam_id__USP_2020": 0.39285714285714285, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__USP_2022": 0.4489795918367347, - "acc,exam_id__UNICAMP_2019": 0.56, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5864240727781665, - "acc,exam_id__2013": 0.6111111111111112, - "acc,exam_id__2010": 0.5726495726495726, - "acc,exam_id__2015": 0.5882352941176471, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2017": 0.5258620689655172, - "acc,exam_id__2012": 0.5603448275862069, - "acc,exam_id__2023": 0.6074074074074074, - "acc,exam_id__2022": 0.5037593984962406, - "acc,exam_id__2016_2": 0.5853658536585366, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2009": 0.6086956521739131, - "acc,exam_id__2011": 0.6666666666666666 - }, - "faquad_nli": { - "f1_macro,all": 0.4358982679885266, - "acc,all": 0.6892307692307692, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7827403231330002, - "acc,all": 0.7892857142857143 - }, - "oab_exams": { - "acc,all": 0.379498861047836, - "acc,exam_id__2011-04": 0.3125, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2015-17": 0.5769230769230769, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2014-13": 0.25, - "acc,exam_id__2017-22": 0.4875, - "acc,exam_id__2010-02": 0.37, - "acc,exam_id__2012-08": 0.4, - "acc,exam_id__2011-05": 0.3375, - "acc,exam_id__2014-14": 0.4625, - "acc,exam_id__2017-24": 0.3375, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2016-19": 0.4230769230769231, - "acc,exam_id__2017-23": 0.35, - "acc,exam_id__2014-15": 0.3974358974358974, - "acc,exam_id__2016-20a": 0.2625, - "acc,exam_id__2015-16": 0.3125, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2011-03": 0.3939393939393939, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2016-20": 0.325, - "acc,exam_id__2012-06a": 0.4375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.44963984969174103, - "acc,all": 0.7332549941245593 - }, - "tweetsentbr": { - "f1_macro,all": 0.5032751303067, - "acc,all": 0.6955223880597015, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9055449263660459, + "acc,all": 0.9048202614379085, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7078631967714201, + "mse,all": 1.2318439950980393, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5104311543810849, + "acc,exam_id__UNICAMP_2023": 0.4186046511627907, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2020": 0.4909090909090909, + "acc,exam_id__USP_2020": 0.39285714285714285, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__USP_2022": 0.4489795918367347, + "acc,exam_id__UNICAMP_2019": 0.56, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5864240727781665, + "acc,exam_id__2013": 0.6111111111111112, + "acc,exam_id__2010": 0.5726495726495726, + "acc,exam_id__2015": 0.5882352941176471, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2017": 0.5258620689655172, + "acc,exam_id__2012": 0.5603448275862069, + "acc,exam_id__2023": 0.6074074074074074, + "acc,exam_id__2022": 0.5037593984962406, + "acc,exam_id__2016_2": 0.5853658536585366, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2009": 0.6086956521739131, + "acc,exam_id__2011": 0.6666666666666666 + }, + "faquad_nli": { + "f1_macro,all": 0.6538474019827898, + "acc,all": 0.6892307692307692, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7827403231330002, + "acc,all": 0.7892857142857143 + }, + "oab_exams": { + "acc,all": 0.379498861047836, + "acc,exam_id__2011-04": 0.3125, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2015-17": 0.5769230769230769, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2014-13": 0.25, + "acc,exam_id__2017-22": 0.4875, + "acc,exam_id__2010-02": 0.37, + "acc,exam_id__2012-08": 0.4, + "acc,exam_id__2011-05": 0.3375, + "acc,exam_id__2014-14": 0.4625, + "acc,exam_id__2017-24": 0.3375, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2016-19": 0.4230769230769231, + "acc,exam_id__2017-23": 0.35, + "acc,exam_id__2014-15": 0.3974358974358974, + "acc,exam_id__2016-20a": 0.2625, + "acc,exam_id__2015-16": 0.3125, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2011-03": 0.3939393939393939, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2016-20": 0.325, + "acc,exam_id__2012-06a": 0.4375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6744597745376115, + "acc,all": 0.7332549941245593 + }, + "tweetsentbr": { + "f1_macro,all": 0.6710335070756, + "acc,all": 0.6955223880597015, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "72cd8e5435ae679249ddad7ac4cdb64c5b4590c3", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14483472384, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "72cd8e5435ae679249ddad7ac4cdb64c5b4590c3", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14483472384, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3/results_2024-08-09T05-34-49.243967.json b/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3/results_2024-08-09T05-34-49.243967.json index a46df53d058fb2bb80ffb0d44f8df2d3eaf695d9..d710bdbadfd88282d0e7873545f84bd9b9b2e7aa 100644 --- a/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3/results_2024-08-09T05-34-49.243967.json +++ b/UCLA-AGI/Mistral7B-PairRM-SPPO-Iter3/results_2024-08-09T05-34-49.243967.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5510519415195376, - "all_grouped_npm": 0.29691683321884477, + "all_grouped_average": 0.652427024230395, + "all_grouped_npm": 0.48419405768083185, "all_grouped": { "enem_challenge": 0.5864240727781665, "bluex": 0.5104311543810849, "oab_exams": 0.379498861047836, - "assin2_rte": 0.6036966175773639, + "assin2_rte": 0.9055449263660459, "assin2_sts": 0.7078631967714201, - "faquad_nli": 0.4358982679885266, + "faquad_nli": 0.6538474019827898, "hatebr_offensive": 0.7827403231330002, - "portuguese_hate_speech": 0.44963984969174103, - "tweetsentbr": 0.5032751303067 + "portuguese_hate_speech": 0.6744597745376115, + "tweetsentbr": 0.6710335070756 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5864240727781665, "harness|bluex|bluex|None|3": 0.5104311543810849, "harness|oab_exams|oab_exams|None|3": 0.379498861047836, - "harness|assin2_rte|assin2_rte|None|15": 0.6036966175773639, + "harness|assin2_rte|assin2_rte|None|15": 0.9055449263660459, "harness|assin2_sts|assin2_sts|None|15": 0.7078631967714201, - "harness|faquad_nli|faquad_nli|None|15": 0.4358982679885266, + "harness|faquad_nli|faquad_nli|None|15": 0.6538474019827898, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7827403231330002, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.44963984969174103, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5032751303067 + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6744597745376115, + "harness|tweetsentbr|tweetsentbr|None|25": 0.6710335070756 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5864240727781665, @@ -125,9 +125,9 @@ "main_score": 0.379498861047836 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.6036966175773639, + "f1_macro,all": 0.9055449263660459, "acc,all": 0.9048202614379085, - "main_score": 0.6036966175773639 + "main_score": 0.9055449263660459 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.7078631967714201, @@ -135,9 +135,9 @@ "main_score": 0.7078631967714201 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.4358982679885266, + "f1_macro,all": 0.6538474019827898, "acc,all": 0.6892307692307692, - "main_score": 0.4358982679885266 + "main_score": 0.6538474019827898 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.7827403231330002, @@ -145,14 +145,14 @@ "main_score": 0.7827403231330002 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.44963984969174103, + "f1_macro,all": 0.6744597745376115, "acc,all": 0.7332549941245593, - "main_score": 0.44963984969174103 + "main_score": 0.6744597745376115 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5032751303067, + "f1_macro,all": 0.6710335070756, "acc,all": 0.6955223880597015, - "main_score": 0.5032751303067 + "main_score": 0.6710335070756 } }, "config_tasks": { diff --git a/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct/raw_2024-05-19T22-55-30.334730/results.json b/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct/raw_2024-05-19T22-55-30.334730/results.json index 35cf3222d68e5597bfbc4ac8aaefe47b4fd7dd46..73acad387851bafd145f33b94270fbf74c661cef 100644 --- a/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct/raw_2024-05-19T22-55-30.334730/results.json +++ b/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct/raw_2024-05-19T22-55-30.334730/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9174464106844742, - "acc,all": 0.9174836601307189, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7270928056226034, - "mse,all": 0.7293872549019608, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5855354659248957, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2019": 0.64, - "acc,exam_id__UNICAMP_2024": 0.6444444444444445, - "acc,exam_id__USP_2021": 0.5769230769230769, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__USP_2019": 0.6, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__USP_2018": 0.4444444444444444, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7102869139258222, - "acc,exam_id__2023": 0.762962962962963, - "acc,exam_id__2009": 0.7478260869565218, - "acc,exam_id__2010": 0.7094017094017094, - "acc,exam_id__2015": 0.7226890756302521, - "acc,exam_id__2017": 0.6896551724137931, - "acc,exam_id__2016_2": 0.6585365853658537, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2011": 0.7350427350427351, - "acc,exam_id__2012": 0.7241379310344828, - "acc,exam_id__2014": 0.7339449541284404, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2016": 0.6942148760330579 - }, - "faquad_nli": { - "f1_macro,all": 0.7545670310701956, - "acc,all": 0.8061538461538461, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8663024455213106, - "acc,all": 0.8664285714285714 - }, - "oab_exams": { - "acc,all": 0.511617312072893, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2017-24": 0.4875, - "acc,exam_id__2011-04": 0.5125, - "acc,exam_id__2014-14": 0.6375, - "acc,exam_id__2012-09": 0.5584415584415584, - "acc,exam_id__2016-19": 0.5256410256410257, - "acc,exam_id__2012-08": 0.5125, - "acc,exam_id__2012-06": 0.525, - "acc,exam_id__2018-25": 0.5375, - "acc,exam_id__2013-11": 0.5125, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2015-17": 0.6410256410256411, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2011-03": 0.494949494949495, - "acc,exam_id__2015-16": 0.4625, - "acc,exam_id__2016-20a": 0.4625, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2013-10": 0.45, - "acc,exam_id__2015-18": 0.525, - "acc,exam_id__2012-06a": 0.5625, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2016-20": 0.55, - "acc,exam_id__2014-15": 0.5641025641025641, - "acc,exam_id__2010-02": 0.55, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2012-07": 0.475, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6111969782184543, - "acc,all": 0.6133960047003525 - }, - "tweetsentbr": { - "f1_macro,all": 0.5061764633858858, - "acc,all": 0.7248756218905472, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9174464106844742, + "acc,all": 0.9174836601307189, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7270928056226034, + "mse,all": 0.7293872549019608, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5855354659248957, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2019": 0.64, + "acc,exam_id__UNICAMP_2024": 0.6444444444444445, + "acc,exam_id__USP_2021": 0.5769230769230769, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__USP_2019": 0.6, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__USP_2018": 0.4444444444444444, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7102869139258222, + "acc,exam_id__2023": 0.762962962962963, + "acc,exam_id__2009": 0.7478260869565218, + "acc,exam_id__2010": 0.7094017094017094, + "acc,exam_id__2015": 0.7226890756302521, + "acc,exam_id__2017": 0.6896551724137931, + "acc,exam_id__2016_2": 0.6585365853658537, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2011": 0.7350427350427351, + "acc,exam_id__2012": 0.7241379310344828, + "acc,exam_id__2014": 0.7339449541284404, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2016": 0.6942148760330579 + }, + "faquad_nli": { + "f1_macro,all": 0.7545670310701956, + "acc,all": 0.8061538461538461, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8663024455213106, + "acc,all": 0.8664285714285714 + }, + "oab_exams": { + "acc,all": 0.511617312072893, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2017-24": 0.4875, + "acc,exam_id__2011-04": 0.5125, + "acc,exam_id__2014-14": 0.6375, + "acc,exam_id__2012-09": 0.5584415584415584, + "acc,exam_id__2016-19": 0.5256410256410257, + "acc,exam_id__2012-08": 0.5125, + "acc,exam_id__2012-06": 0.525, + "acc,exam_id__2018-25": 0.5375, + "acc,exam_id__2013-11": 0.5125, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2015-17": 0.6410256410256411, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2011-03": 0.494949494949495, + "acc,exam_id__2015-16": 0.4625, + "acc,exam_id__2016-20a": 0.4625, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2013-10": 0.45, + "acc,exam_id__2015-18": 0.525, + "acc,exam_id__2012-06a": 0.5625, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2016-20": 0.55, + "acc,exam_id__2014-15": 0.5641025641025641, + "acc,exam_id__2010-02": 0.55, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2012-07": 0.475, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6111969782184543, + "acc,all": 0.6133960047003525 + }, + "tweetsentbr": { + "f1_macro,all": 0.6749019511811811, + "acc,all": 0.7248756218905472, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "37127c44d7c0fb56cef817270c4b1a6802d8793a", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530688, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.769123783032, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "37127c44d7c0fb56cef817270c4b1a6802d8793a", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530688, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.769123783032, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct/results_2024-05-19T22-55-30.334730.json b/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct/results_2024-05-19T22-55-30.334730.json index a3f9be4e8056959a370fc275d2507ce545837e54..d7656eb9a77e2911996427088f62f3815f306372 100644 --- a/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct/results_2024-05-19T22-55-30.334730.json +++ b/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct/results_2024-05-19T22-55-30.334730.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.687802425158504, - "all_grouped_npm": 0.5349106999142944, + "all_grouped_average": 0.7065497015802034, + "all_grouped_npm": 0.5628084326846806, "all_grouped": { "enem_challenge": 0.7102869139258222, "bluex": 0.5855354659248957, @@ -45,7 +45,7 @@ "faquad_nli": 0.7545670310701956, "hatebr_offensive": 0.8663024455213106, "portuguese_hate_speech": 0.6111969782184543, - "tweetsentbr": 0.5061764633858858 + "tweetsentbr": 0.6749019511811811 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7102869139258222, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7545670310701956, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8663024455213106, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6111969782184543, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5061764633858858 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6749019511811811 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7102869139258222, @@ -150,9 +150,9 @@ "main_score": 0.6111969782184543 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5061764633858858, + "f1_macro,all": 0.6749019511811811, "acc,all": 0.7248756218905472, - "main_score": 0.5061764633858858 + "main_score": 0.6749019511811811 } }, "config_tasks": { diff --git a/VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct/raw_2024-08-07T05-12-30.136519/results.json b/VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct/raw_2024-08-07T05-12-30.136519/results.json index 5ce16cf6b210a2d4985bdb7f251cdee77879a24b..5d20a84bb5061089ecb1119b287101eb9b02d0b8 100644 --- a/VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct/raw_2024-08-07T05-12-30.136519/results.json +++ b/VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct/raw_2024-08-07T05-12-30.136519/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9264580217902157, - "acc,all": 0.9264705882352942, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.759889315042445, - "mse,all": 0.5542159415849673, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5841446453407511, - "acc,exam_id__USP_2021": 0.6153846153846154, - "acc,exam_id__USP_2022": 0.6122448979591837, - "acc,exam_id__USP_2024": 0.8292682926829268, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__USP_2018": 0.5925925925925926, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__USP_2023": 0.75, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__USP_2020": 0.5892857142857143, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7004898530440867, - "acc,exam_id__2016": 0.6942148760330579, - "acc,exam_id__2015": 0.7226890756302521, - "acc,exam_id__2011": 0.7521367521367521, - "acc,exam_id__2009": 0.7217391304347827, - "acc,exam_id__2022": 0.6390977443609023, - "acc,exam_id__2023": 0.6888888888888889, - "acc,exam_id__2010": 0.7777777777777778, - "acc,exam_id__2014": 0.6880733944954128, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2017": 0.6896551724137931 - }, - "faquad_nli": { - "f1_macro,all": 0.8317563486813724, - "acc,all": 0.8753846153846154, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8197534327445837, - "acc,all": 0.8242857142857143 - }, - "oab_exams": { - "acc,all": 0.5252847380410023, - "acc,exam_id__2010-02": 0.58, - "acc,exam_id__2014-13": 0.4875, - "acc,exam_id__2015-16": 0.5, - "acc,exam_id__2010-01": 0.49411764705882355, - "acc,exam_id__2016-20a": 0.4875, - "acc,exam_id__2017-24": 0.5, - "acc,exam_id__2012-07": 0.575, - "acc,exam_id__2015-18": 0.55, - "acc,exam_id__2014-15": 0.6282051282051282, - "acc,exam_id__2011-03": 0.494949494949495, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2016-19": 0.5384615384615384, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2012-06a": 0.55, - "acc,exam_id__2012-06": 0.525, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2015-17": 0.6025641025641025, - "acc,exam_id__2016-21": 0.4875, - "acc,exam_id__2013-10": 0.6125, - "acc,exam_id__2012-08": 0.525, - "acc,exam_id__2017-22": 0.625, - "acc,exam_id__2014-14": 0.6, - "acc,exam_id__2012-09": 0.45454545454545453, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2016-20": 0.5375, - "acc,exam_id__2011-05": 0.5375, - "acc,exam_id__2011-04": 0.4375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7566728675761334, - "acc,all": 0.8049353701527615 - }, - "tweetsentbr": { - "f1_macro,all": 0.5423322114730253, - "acc,all": 0.7517412935323383, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9264580217902157, + "acc,all": 0.9264705882352942, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.759889315042445, + "mse,all": 0.5542159415849673, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5841446453407511, + "acc,exam_id__USP_2021": 0.6153846153846154, + "acc,exam_id__USP_2022": 0.6122448979591837, + "acc,exam_id__USP_2024": 0.8292682926829268, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__USP_2018": 0.5925925925925926, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__USP_2023": 0.75, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__USP_2020": 0.5892857142857143, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7004898530440867, + "acc,exam_id__2016": 0.6942148760330579, + "acc,exam_id__2015": 0.7226890756302521, + "acc,exam_id__2011": 0.7521367521367521, + "acc,exam_id__2009": 0.7217391304347827, + "acc,exam_id__2022": 0.6390977443609023, + "acc,exam_id__2023": 0.6888888888888889, + "acc,exam_id__2010": 0.7777777777777778, + "acc,exam_id__2014": 0.6880733944954128, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2017": 0.6896551724137931 + }, + "faquad_nli": { + "f1_macro,all": 0.8317563486813724, + "acc,all": 0.8753846153846154, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8197534327445837, + "acc,all": 0.8242857142857143 + }, + "oab_exams": { + "acc,all": 0.5252847380410023, + "acc,exam_id__2010-02": 0.58, + "acc,exam_id__2014-13": 0.4875, + "acc,exam_id__2015-16": 0.5, + "acc,exam_id__2010-01": 0.49411764705882355, + "acc,exam_id__2016-20a": 0.4875, + "acc,exam_id__2017-24": 0.5, + "acc,exam_id__2012-07": 0.575, + "acc,exam_id__2015-18": 0.55, + "acc,exam_id__2014-15": 0.6282051282051282, + "acc,exam_id__2011-03": 0.494949494949495, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2016-19": 0.5384615384615384, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2012-06a": 0.55, + "acc,exam_id__2012-06": 0.525, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2015-17": 0.6025641025641025, + "acc,exam_id__2016-21": 0.4875, + "acc,exam_id__2013-10": 0.6125, + "acc,exam_id__2012-08": 0.525, + "acc,exam_id__2017-22": 0.625, + "acc,exam_id__2014-14": 0.6, + "acc,exam_id__2012-09": 0.45454545454545453, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2016-20": 0.5375, + "acc,exam_id__2011-05": 0.5375, + "acc,exam_id__2011-04": 0.4375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7566728675761334, + "acc,all": 0.8049353701527615 + }, + "tweetsentbr": { + "f1_macro,all": 0.7231096152973672, + "acc,all": 0.7517412935323383, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "fcb056465084ab2c71503a0760f46e4be79c985c", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 24495575040, - "model_num_parameters": 12247782400, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1101.376633986928, - "min_seq_length": 1083, - "max_seq_length": 1159, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1310.376633986928, - "min_seq_length": 1292, - "max_seq_length": 1368, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1404.518776077886, - "min_seq_length": 1101, - "max_seq_length": 2054, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1276.4590622813157, - "min_seq_length": 1067, - "max_seq_length": 2341, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1174.0492307692307, - "min_seq_length": 1133, - "max_seq_length": 1254, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "fcb056465084ab2c71503a0760f46e4be79c985c", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 24495575040, + "model_num_parameters": 12247782400, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1080.5, - "min_seq_length": 1060, - "max_seq_length": 1288, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1030.0373576309794, - "min_seq_length": 818, - "max_seq_length": 1376, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1101.376633986928, + "min_seq_length": 1083, + "max_seq_length": 1159, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1310.376633986928, + "min_seq_length": 1292, + "max_seq_length": 1368, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1404.518776077886, + "min_seq_length": 1101, + "max_seq_length": 2054, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1276.4590622813157, + "min_seq_length": 1067, + "max_seq_length": 2341, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1174.0492307692307, + "min_seq_length": 1133, + "max_seq_length": 1254, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1080.5, + "min_seq_length": 1060, + "max_seq_length": 1288, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1030.0373576309794, + "min_seq_length": 818, + "max_seq_length": 1376, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1450.2761457109284, + "min_seq_length": 1421, + "max_seq_length": 1492, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1294.776616915423, + "min_seq_length": 1277, + "max_seq_length": 1367, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1450.2761457109284, - "min_seq_length": 1421, - "max_seq_length": 1492, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1294.776616915423, - "min_seq_length": 1277, - "max_seq_length": 1367, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct/results_2024-08-07T05-12-30.136519.json b/VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct/results_2024-08-07T05-12-30.136519.json index ef63001150acaf567d9b02d5ea5445d5ebada75f..4de0605f6d8c133e7926a3527d5b939f8f61b350 100644 --- a/VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct/results_2024-08-07T05-12-30.136519.json +++ b/VAGOsolutions/SauerkrautLM-Nemo-12b-Instruct/results_2024-08-07T05-12-30.136519.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.716309048192624, - "all_grouped_npm": 0.58344666521289, + "all_grouped_average": 0.7363954263953285, + "all_grouped_npm": 0.6133371089669148, "all_grouped": { "enem_challenge": 0.7004898530440867, "bluex": 0.5841446453407511, @@ -45,7 +45,7 @@ "faquad_nli": 0.8317563486813724, "hatebr_offensive": 0.8197534327445837, "portuguese_hate_speech": 0.7566728675761334, - "tweetsentbr": 0.5423322114730253 + "tweetsentbr": 0.7231096152973672 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7004898530440867, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.8317563486813724, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8197534327445837, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7566728675761334, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5423322114730253 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7231096152973672 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7004898530440867, @@ -150,9 +150,9 @@ "main_score": 0.7566728675761334 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5423322114730253, + "f1_macro,all": 0.7231096152973672, "acc,all": 0.7517412935323383, - "main_score": 0.5423322114730253 + "main_score": 0.7231096152973672 } }, "config_tasks": { diff --git a/Walmart-the-bag/Misted-v2-7B/raw_2024-04-18T23-28-15.566455/results.json b/Walmart-the-bag/Misted-v2-7B/raw_2024-04-18T23-28-15.566455/results.json index f73b23f854b536a60df6efe77c623ee84406079e..eb6a641631dccf07c6109926f5aa48759825b217 100644 --- a/Walmart-the-bag/Misted-v2-7B/raw_2024-04-18T23-28-15.566455/results.json +++ b/Walmart-the-bag/Misted-v2-7B/raw_2024-04-18T23-28-15.566455/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9113400977866568, - "acc,all": 0.9113562091503268, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7517173052153636, - "mse,all": 0.5459004660947713, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5382475660639777, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__UNICAMP_2020": 0.4909090909090909, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__UNICAMP_2021_2": 0.6470588235294118, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__USP_2019": 0.475, - "acc,exam_id__USP_2020": 0.44642857142857145, - "acc,exam_id__USP_2024": 0.7560975609756098, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.629111266620014, - "acc,exam_id__2022": 0.5639097744360902, - "acc,exam_id__2017": 0.6293103448275862, - "acc,exam_id__2014": 0.6513761467889908, - "acc,exam_id__2011": 0.7350427350427351, - "acc,exam_id__2010": 0.5811965811965812, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2023": 0.6592592592592592, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2009": 0.6347826086956522, - "acc,exam_id__2013": 0.6574074074074074, - "acc,exam_id__2012": 0.6120689655172413 - }, - "faquad_nli": { - "f1_macro,all": 0.6589071710738363, - "acc,all": 0.696923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8156351971251167, - "acc,all": 0.8192857142857143 - }, - "oab_exams": { - "acc,all": 0.4091116173120729, - "acc,exam_id__2012-06a": 0.3875, - "acc,exam_id__2012-09": 0.4675324675324675, - "acc,exam_id__2014-15": 0.41025641025641024, - "acc,exam_id__2015-17": 0.5256410256410257, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2011-05": 0.4125, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2017-23": 0.3625, - "acc,exam_id__2017-22": 0.5625, - "acc,exam_id__2011-03": 0.40404040404040403, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2012-07": 0.4125, - "acc,exam_id__2013-11": 0.3875, - "acc,exam_id__2016-21": 0.35, - "acc,exam_id__2013-12": 0.475, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2016-20a": 0.3, - "acc,exam_id__2012-08": 0.4, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2012-06": 0.425, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2014-14": 0.4875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7150363835404727, - "acc,all": 0.7555816686251469 - }, - "tweetsentbr": { - "f1_macro,all": 0.5108106892668447, - "acc,all": 0.6975124378109453, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9113400977866568, + "acc,all": 0.9113562091503268, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7517173052153636, + "mse,all": 0.5459004660947713, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5382475660639777, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__UNICAMP_2020": 0.4909090909090909, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__UNICAMP_2021_2": 0.6470588235294118, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__USP_2019": 0.475, + "acc,exam_id__USP_2020": 0.44642857142857145, + "acc,exam_id__USP_2024": 0.7560975609756098, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.629111266620014, + "acc,exam_id__2022": 0.5639097744360902, + "acc,exam_id__2017": 0.6293103448275862, + "acc,exam_id__2014": 0.6513761467889908, + "acc,exam_id__2011": 0.7350427350427351, + "acc,exam_id__2010": 0.5811965811965812, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2023": 0.6592592592592592, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2009": 0.6347826086956522, + "acc,exam_id__2013": 0.6574074074074074, + "acc,exam_id__2012": 0.6120689655172413 + }, + "faquad_nli": { + "f1_macro,all": 0.6589071710738363, + "acc,all": 0.696923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8156351971251167, + "acc,all": 0.8192857142857143 + }, + "oab_exams": { + "acc,all": 0.4091116173120729, + "acc,exam_id__2012-06a": 0.3875, + "acc,exam_id__2012-09": 0.4675324675324675, + "acc,exam_id__2014-15": 0.41025641025641024, + "acc,exam_id__2015-17": 0.5256410256410257, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2011-05": 0.4125, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2017-23": 0.3625, + "acc,exam_id__2017-22": 0.5625, + "acc,exam_id__2011-03": 0.40404040404040403, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2012-07": 0.4125, + "acc,exam_id__2013-11": 0.3875, + "acc,exam_id__2016-21": 0.35, + "acc,exam_id__2013-12": 0.475, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2016-20a": 0.3, + "acc,exam_id__2012-08": 0.4, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2012-06": 0.425, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2014-14": 0.4875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7150363835404727, + "acc,all": 0.7555816686251469 + }, + "tweetsentbr": { + "f1_macro,all": 0.6810809190224597, + "acc,all": 0.6975124378109453, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "b59fe4e4136b5cf01c1a49a0a4f49b2faaf7dfcc", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "b59fe4e4136b5cf01c1a49a0a4f49b2faaf7dfcc", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1881.2492537313433, + "min_seq_length": 1860, + "max_seq_length": 1976, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Walmart-the-bag/Misted-v2-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1881.2492537313433, - "min_seq_length": 1860, - "max_seq_length": 1976, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Walmart-the-bag/Misted-v2-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/Walmart-the-bag/Misted-v2-7B/results_2024-04-18T23-28-15.566455.json b/Walmart-the-bag/Misted-v2-7B/results_2024-04-18T23-28-15.566455.json index 93295f58cff99c640f7d0570a6fef61eb52d9d0d..9d374ea1720428b4667deeb62f8ccaed1236d34d 100644 --- a/Walmart-the-bag/Misted-v2-7B/results_2024-04-18T23-28-15.566455.json +++ b/Walmart-the-bag/Misted-v2-7B/results_2024-04-18T23-28-15.566455.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6599908104449282, - "all_grouped_npm": 0.49516354785833655, + "all_grouped_average": 0.6789097248622188, + "all_grouped_npm": 0.523316694312638, "all_grouped": { "enem_challenge": 0.629111266620014, "bluex": 0.5382475660639777, @@ -45,7 +45,7 @@ "faquad_nli": 0.6589071710738363, "hatebr_offensive": 0.8156351971251167, "portuguese_hate_speech": 0.7150363835404727, - "tweetsentbr": 0.5108106892668447 + "tweetsentbr": 0.6810809190224597 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.629111266620014, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6589071710738363, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8156351971251167, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7150363835404727, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5108106892668447 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6810809190224597 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.629111266620014, @@ -150,9 +150,9 @@ "main_score": 0.7150363835404727 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5108106892668447, + "f1_macro,all": 0.6810809190224597, "acc,all": 0.6975124378109453, - "main_score": 0.5108106892668447 + "main_score": 0.6810809190224597 } }, "config_tasks": { diff --git a/Walmart-the-bag/Quintellect-10.7B/raw_2024-04-22T07-23-08.500560/results.json b/Walmart-the-bag/Quintellect-10.7B/raw_2024-04-22T07-23-08.500560/results.json index a79174f76891d6b1bce77b376ed51a4172fcdf5f..572ca35644504920d71db754a6f34f9bdc0d7fe8 100644 --- a/Walmart-the-bag/Quintellect-10.7B/raw_2024-04-22T07-23-08.500560/results.json +++ b/Walmart-the-bag/Quintellect-10.7B/raw_2024-04-22T07-23-08.500560/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9170701677123623, - "acc,all": 0.9170751633986928, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7627230529499934, - "mse,all": 0.48967728758169937, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.49235048678720444, - "acc,exam_id__USP_2024": 0.5853658536585366, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__UNICAMP_2023": 0.46511627906976744, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2024": 0.4, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__UNICAMP_2022": 0.48717948717948717, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__USP_2019": 0.375, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2019": 0.54, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6277116864940517, - "acc,exam_id__2016_2": 0.6178861788617886, - "acc,exam_id__2013": 0.6481481481481481, - "acc,exam_id__2017": 0.6293103448275862, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2023": 0.6666666666666666, - "acc,exam_id__2015": 0.5882352941176471, - "acc,exam_id__2010": 0.5897435897435898, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2022": 0.5714285714285714, - "acc,exam_id__2016": 0.628099173553719, - "acc,exam_id__2012": 0.6293103448275862, - "acc,exam_id__2014": 0.6513761467889908 - }, - "faquad_nli": { - "f1_macro,all": 0.7349043532671851, - "acc,all": 0.7753846153846153, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.735005055611729, - "acc,all": 0.7492857142857143 - }, - "oab_exams": { - "acc,all": 0.4123006833712984, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2012-07": 0.4375, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2012-06a": 0.325, - "acc,exam_id__2015-18": 0.4, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2011-04": 0.35, - "acc,exam_id__2016-20": 0.4375, - "acc,exam_id__2011-03": 0.29292929292929293, - "acc,exam_id__2014-15": 0.5, - "acc,exam_id__2015-16": 0.4125, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2012-06": 0.4875, - "acc,exam_id__2012-08": 0.35, - "acc,exam_id__2013-10": 0.4375, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2015-17": 0.44871794871794873, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2013-11": 0.3875, - "acc,exam_id__2010-02": 0.43, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2012-09": 0.38961038961038963, - "acc,exam_id__2018-25": 0.4375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6935314304170537, - "acc,all": 0.7379553466509988 - }, - "tweetsentbr": { - "f1_macro,all": 0.4997825108073116, - "acc,all": 0.690547263681592, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9170701677123623, + "acc,all": 0.9170751633986928, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7627230529499934, + "mse,all": 0.48967728758169937, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.49235048678720444, + "acc,exam_id__USP_2024": 0.5853658536585366, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__UNICAMP_2023": 0.46511627906976744, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2024": 0.4, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__UNICAMP_2022": 0.48717948717948717, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__USP_2019": 0.375, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2019": 0.54, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6277116864940517, + "acc,exam_id__2016_2": 0.6178861788617886, + "acc,exam_id__2013": 0.6481481481481481, + "acc,exam_id__2017": 0.6293103448275862, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2023": 0.6666666666666666, + "acc,exam_id__2015": 0.5882352941176471, + "acc,exam_id__2010": 0.5897435897435898, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2022": 0.5714285714285714, + "acc,exam_id__2016": 0.628099173553719, + "acc,exam_id__2012": 0.6293103448275862, + "acc,exam_id__2014": 0.6513761467889908 + }, + "faquad_nli": { + "f1_macro,all": 0.7349043532671851, + "acc,all": 0.7753846153846153, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.735005055611729, + "acc,all": 0.7492857142857143 + }, + "oab_exams": { + "acc,all": 0.4123006833712984, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2012-07": 0.4375, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2012-06a": 0.325, + "acc,exam_id__2015-18": 0.4, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2011-04": 0.35, + "acc,exam_id__2016-20": 0.4375, + "acc,exam_id__2011-03": 0.29292929292929293, + "acc,exam_id__2014-15": 0.5, + "acc,exam_id__2015-16": 0.4125, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2012-06": 0.4875, + "acc,exam_id__2012-08": 0.35, + "acc,exam_id__2013-10": 0.4375, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2015-17": 0.44871794871794873, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2013-11": 0.3875, + "acc,exam_id__2010-02": 0.43, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2012-09": 0.38961038961038963, + "acc,exam_id__2018-25": 0.4375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6935314304170537, + "acc,all": 0.7379553466509988 + }, + "tweetsentbr": { + "f1_macro,all": 0.6663766810764157, + "acc,all": 0.690547263681592, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "a49c47e1ec1244761cab725763c20d1602064154", - "model_dtype": "torch.float16", - "model_memory_footprint": 22268366848, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1479.7455065359477, - "min_seq_length": 1456, - "max_seq_length": 1546, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1719.7455065359477, - "min_seq_length": 1696, - "max_seq_length": 1786, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1749.9262865090404, - "min_seq_length": 1373, - "max_seq_length": 2550, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1650.039188243527, - "min_seq_length": 1384, - "max_seq_length": 2648, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1720.9876923076922, - "min_seq_length": 1665, - "max_seq_length": 1841, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "a49c47e1ec1244761cab725763c20d1602064154", + "model_dtype": "torch.float16", + "model_memory_footprint": 22268366848, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1511.3878571428572, - "min_seq_length": 1488, - "max_seq_length": 1762, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1395.764464692483, - "min_seq_length": 1129, - "max_seq_length": 1898, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1479.7455065359477, + "min_seq_length": 1456, + "max_seq_length": 1546, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1719.7455065359477, + "min_seq_length": 1696, + "max_seq_length": 1786, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1749.9262865090404, + "min_seq_length": 1373, + "max_seq_length": 2550, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1650.039188243527, + "min_seq_length": 1384, + "max_seq_length": 2648, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1720.9876923076922, + "min_seq_length": 1665, + "max_seq_length": 1841, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1511.3878571428572, + "min_seq_length": 1488, + "max_seq_length": 1762, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1395.764464692483, + "min_seq_length": 1129, + "max_seq_length": 1898, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2012.3360752056403, + "min_seq_length": 1977, + "max_seq_length": 2051, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1758.2492537313433, + "min_seq_length": 1737, + "max_seq_length": 1853, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2012.3360752056403, - "min_seq_length": 1977, - "max_seq_length": 2051, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Walmart-the-bag/Quintellect-10.7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1758.2492537313433, - "min_seq_length": 1737, - "max_seq_length": 1853, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Walmart-the-bag/Quintellect-10.7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/Walmart-the-bag/Quintellect-10.7B/results_2024-04-22T07-23-08.500560.json b/Walmart-the-bag/Quintellect-10.7B/results_2024-04-22T07-23-08.500560.json index 833497adf51684f23ddc67cb4e5487f6c0d31050..882842981f9b7d5c726860e7707625e1c999ebe4 100644 --- a/Walmart-the-bag/Quintellect-10.7B/results_2024-04-22T07-23-08.500560.json +++ b/Walmart-the-bag/Quintellect-10.7B/results_2024-04-22T07-23-08.500560.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6528199363797988, - "all_grouped_npm": 0.4825523923955195, + "all_grouped_average": 0.6713303997430325, + "all_grouped_npm": 0.5100977247812841, "all_grouped": { "enem_challenge": 0.6277116864940517, "bluex": 0.49235048678720444, @@ -45,7 +45,7 @@ "faquad_nli": 0.7349043532671851, "hatebr_offensive": 0.735005055611729, "portuguese_hate_speech": 0.6935314304170537, - "tweetsentbr": 0.4997825108073116 + "tweetsentbr": 0.6663766810764157 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6277116864940517, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7349043532671851, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.735005055611729, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6935314304170537, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4997825108073116 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6663766810764157 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6277116864940517, @@ -150,9 +150,9 @@ "main_score": 0.6935314304170537 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4997825108073116, + "f1_macro,all": 0.6663766810764157, "acc,all": 0.690547263681592, - "main_score": 0.4997825108073116 + "main_score": 0.6663766810764157 } }, "config_tasks": { diff --git a/Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO/raw_2024-04-14T03-35-32.685055/results.json b/Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO/raw_2024-04-14T03-35-32.685055/results.json index c8678b9540bda5488dff068ca5ac9a3aba358673..a77711ecfcf84d48938a4c64aca9046476741d8a 100644 --- a/Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO/raw_2024-04-14T03-35-32.685055/results.json +++ b/Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO/raw_2024-04-14T03-35-32.685055/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8844680081937584, - "acc,all": 0.8848039215686274, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6873093329646505, - "mse,all": 0.6536029411764707, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.47426981919332406, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2023": 0.37209302325581395, - "acc,exam_id__USP_2021": 0.40384615384615385, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__UNICAMP_2021_1": 0.391304347826087, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__USP_2022": 0.5714285714285714, - "acc,exam_id__UNICAMP_2020": 0.4727272727272727, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.562631210636809, - "acc,exam_id__2017": 0.5344827586206896, - "acc,exam_id__2009": 0.5565217391304348, - "acc,exam_id__2014": 0.5688073394495413, - "acc,exam_id__2012": 0.5948275862068966, - "acc,exam_id__2010": 0.5128205128205128, - "acc,exam_id__2022": 0.5112781954887218, - "acc,exam_id__2016": 0.5454545454545454, - "acc,exam_id__2016_2": 0.5365853658536586, - "acc,exam_id__2011": 0.6752136752136753, - "acc,exam_id__2013": 0.6018518518518519, - "acc,exam_id__2023": 0.5555555555555556, - "acc,exam_id__2015": 0.5714285714285714 - }, - "faquad_nli": { - "f1_macro,all": 0.6130518712248583, - "acc,all": 0.6323076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8071243382571215, - "acc,all": 0.8107142857142857 - }, - "oab_exams": { - "acc,all": 0.38223234624145785, - "acc,exam_id__2016-20a": 0.2125, - "acc,exam_id__2014-15": 0.5, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2012-06a": 0.3875, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2017-23": 0.4, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2015-18": 0.4, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2015-17": 0.4230769230769231, - "acc,exam_id__2018-25": 0.3625, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2011-05": 0.4, - "acc,exam_id__2013-10": 0.325, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2011-04": 0.3375, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2010-02": 0.45, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2017-24": 0.3125, - "acc,exam_id__2012-07": 0.325, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2014-14": 0.3375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6608190340441862, - "acc,all": 0.6839012925969448 - }, - "tweetsentbr": { - "f1_macro,all": 0.4753204671585061, - "acc,all": 0.681592039800995, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8844680081937584, + "acc,all": 0.8848039215686274, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6873093329646505, + "mse,all": 0.6536029411764707, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.47426981919332406, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2023": 0.37209302325581395, + "acc,exam_id__USP_2021": 0.40384615384615385, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__UNICAMP_2021_1": 0.391304347826087, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__USP_2022": 0.5714285714285714, + "acc,exam_id__UNICAMP_2020": 0.4727272727272727, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.562631210636809, + "acc,exam_id__2017": 0.5344827586206896, + "acc,exam_id__2009": 0.5565217391304348, + "acc,exam_id__2014": 0.5688073394495413, + "acc,exam_id__2012": 0.5948275862068966, + "acc,exam_id__2010": 0.5128205128205128, + "acc,exam_id__2022": 0.5112781954887218, + "acc,exam_id__2016": 0.5454545454545454, + "acc,exam_id__2016_2": 0.5365853658536586, + "acc,exam_id__2011": 0.6752136752136753, + "acc,exam_id__2013": 0.6018518518518519, + "acc,exam_id__2023": 0.5555555555555556, + "acc,exam_id__2015": 0.5714285714285714 + }, + "faquad_nli": { + "f1_macro,all": 0.6130518712248583, + "acc,all": 0.6323076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8071243382571215, + "acc,all": 0.8107142857142857 + }, + "oab_exams": { + "acc,all": 0.38223234624145785, + "acc,exam_id__2016-20a": 0.2125, + "acc,exam_id__2014-15": 0.5, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2012-06a": 0.3875, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2017-23": 0.4, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2015-18": 0.4, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2015-17": 0.4230769230769231, + "acc,exam_id__2018-25": 0.3625, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2011-05": 0.4, + "acc,exam_id__2013-10": 0.325, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2011-04": 0.3375, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2010-02": 0.45, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2017-24": 0.3125, + "acc,exam_id__2012-07": 0.325, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2014-14": 0.3375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6608190340441862, + "acc,all": 0.6839012925969448 + }, + "tweetsentbr": { + "f1_macro,all": 0.633760622878008, + "acc,all": 0.681592039800995, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "8111881006d145806bba6b72dbb51aa63a1e1242", - "model_dtype": "torch.float16", - "model_memory_footprint": 15033974784, - "model_num_parameters": 7248547840, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:6", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1646.7455065359477, - "min_seq_length": 1623, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1855.7455065359477, - "min_seq_length": 1832, - "max_seq_length": 1922, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1792.9262865090404, - "min_seq_length": 1416, - "max_seq_length": 2593, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1693.039188243527, - "min_seq_length": 1427, - "max_seq_length": 2691, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1871.9876923076922, - "min_seq_length": 1816, - "max_seq_length": 1992, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1752.3878571428572, - "min_seq_length": 1729, - "max_seq_length": 2003, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "8111881006d145806bba6b72dbb51aa63a1e1242", + "model_dtype": "torch.float16", + "model_memory_footprint": 15033974784, + "model_num_parameters": 7248547840, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:6", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1438.764464692483, - "min_seq_length": 1172, - "max_seq_length": 1941, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1646.7455065359477, + "min_seq_length": 1623, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1855.7455065359477, + "min_seq_length": 1832, + "max_seq_length": 1922, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1792.9262865090404, + "min_seq_length": 1416, + "max_seq_length": 2593, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1693.039188243527, + "min_seq_length": 1427, + "max_seq_length": 2691, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1871.9876923076922, + "min_seq_length": 1816, + "max_seq_length": 1992, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1752.3878571428572, + "min_seq_length": 1729, + "max_seq_length": 2003, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1438.764464692483, + "min_seq_length": 1172, + "max_seq_length": 1941, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2253.3360752056406, + "min_seq_length": 2218, + "max_seq_length": 2292, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1999.2492537313433, + "min_seq_length": 1978, + "max_seq_length": 2094, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2253.3360752056406, - "min_seq_length": 2218, - "max_seq_length": 2292, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Weni/WeniGPT-2.2.3-Zephyr-7B-LLM_Base_2.0.3_SFT,peft=Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO,dtype=float16,device=cuda:6,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1999.2492537313433, - "min_seq_length": 1978, - "max_seq_length": 2094, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Weni/WeniGPT-2.2.3-Zephyr-7B-LLM_Base_2.0.3_SFT,peft=Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO,dtype=float16,device=cuda:6,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "1158bba" + "git_hash": "1158bba" } \ No newline at end of file diff --git a/Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO/results_2024-04-14T03-35-32.685055.json b/Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO/results_2024-04-14T03-35-32.685055.json index 1e36fa530f16a246232d9d018474b1a529c34f84..f16143986a8123d66d0e03fdce34622cb91825db 100644 --- a/Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO/results_2024-04-14T03-35-32.685055.json +++ b/Weni/WeniGPT-2.4.1-Zephyr-7B-3-epochs-GPT-QA-1.0.1_DP_DPO/results_2024-04-14T03-35-32.685055.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6163584919905192, - "all_grouped_npm": 0.43095970670675215, + "all_grouped_average": 0.6339629537371305, + "all_grouped_npm": 0.45715682240111427, "all_grouped": { "enem_challenge": 0.562631210636809, "bluex": 0.47426981919332406, @@ -45,7 +45,7 @@ "faquad_nli": 0.6130518712248583, "hatebr_offensive": 0.8071243382571215, "portuguese_hate_speech": 0.6608190340441862, - "tweetsentbr": 0.4753204671585061 + "tweetsentbr": 0.633760622878008 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.562631210636809, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6130518712248583, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8071243382571215, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6608190340441862, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4753204671585061 + "harness|tweetsentbr|tweetsentbr|None|25": 0.633760622878008 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.562631210636809, @@ -150,9 +150,9 @@ "main_score": 0.6608190340441862 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4753204671585061, + "f1_macro,all": 0.633760622878008, "acc,all": 0.681592039800995, - "main_score": 0.4753204671585061 + "main_score": 0.633760622878008 } }, "config_tasks": { diff --git a/Weni/WeniGPT-Mistral-7B-instructBase-4bit/raw_2024-04-14T01-06-41.169623/results.json b/Weni/WeniGPT-Mistral-7B-instructBase-4bit/raw_2024-04-14T01-06-41.169623/results.json index 900569435ed4e3806f75961c0ae76d16ef31a25f..1807c9a281cac4031c0b69a7aa78621907b53b2f 100644 --- a/Weni/WeniGPT-Mistral-7B-instructBase-4bit/raw_2024-04-14T01-06-41.169623/results.json +++ b/Weni/WeniGPT-Mistral-7B-instructBase-4bit/raw_2024-04-14T01-06-41.169623/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.7465864435011207, - "acc,all": 0.7561274509803921, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.5521492658011754, - "mse,all": 1.1086478758169935, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.24478442280945759, - "acc,exam_id__USP_2018": 0.25925925925925924, - "acc,exam_id__USP_2019": 0.275, - "acc,exam_id__USP_2023": 0.20454545454545456, - "acc,exam_id__USP_2022": 0.20408163265306123, - "acc,exam_id__UNICAMP_2023": 0.13953488372093023, - "acc,exam_id__UNICAMP_2018": 0.2777777777777778, - "acc,exam_id__UNICAMP_2019": 0.24, - "acc,exam_id__UNICAMP_2024": 0.28888888888888886, - "acc,exam_id__UNICAMP_2021_1": 0.2391304347826087, - "acc,exam_id__UNICAMP_2021_2": 0.27450980392156865, - "acc,exam_id__USP_2020": 0.32142857142857145, - "acc,exam_id__UNICAMP_2022": 0.3076923076923077, - "acc,exam_id__UNICAMP_2020": 0.2, - "acc,exam_id__USP_2024": 0.2926829268292683, - "acc,exam_id__USP_2021": 0.15384615384615385, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.2918124562631211, - "acc,exam_id__2011": 0.2564102564102564, - "acc,exam_id__2015": 0.31932773109243695, - "acc,exam_id__2017": 0.27586206896551724, - "acc,exam_id__2023": 0.26666666666666666, - "acc,exam_id__2016_2": 0.2764227642276423, - "acc,exam_id__2022": 0.3082706766917293, - "acc,exam_id__2009": 0.30434782608695654, - "acc,exam_id__2010": 0.358974358974359, - "acc,exam_id__2016": 0.2809917355371901, - "acc,exam_id__2014": 0.26605504587155965, - "acc,exam_id__2012": 0.29310344827586204, - "acc,exam_id__2013": 0.2962962962962963 - }, - "faquad_nli": { - "f1_macro,all": 0.21574526524224713, - "acc,all": 0.3292307692307692, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.4601938431322317, - "acc,all": 0.7028571428571428 - }, - "oab_exams": { - "acc,all": 0.2979498861047836, - "acc,exam_id__2013-11": 0.3625, - "acc,exam_id__2016-19": 0.21794871794871795, - "acc,exam_id__2016-20a": 0.25, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2016-20": 0.3375, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2015-17": 0.38461538461538464, - "acc,exam_id__2017-24": 0.275, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2013-10": 0.35, - "acc,exam_id__2013-12": 0.325, - "acc,exam_id__2012-06": 0.3375, - "acc,exam_id__2014-15": 0.21794871794871795, - "acc,exam_id__2017-22": 0.325, - "acc,exam_id__2011-04": 0.2875, - "acc,exam_id__2016-21": 0.25, - "acc,exam_id__2012-07": 0.25, - "acc,exam_id__2012-08": 0.3, - "acc,exam_id__2015-18": 0.225, - "acc,exam_id__2015-16": 0.3125, - "acc,exam_id__2012-09": 0.23376623376623376, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2011-05": 0.3625, - "acc,exam_id__2010-02": 0.29, - "acc,exam_id__2014-14": 0.175, - "acc,exam_id__2017-23": 0.275, - "acc,exam_id__2014-13": 0.375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5682103706200091, - "acc,all": 0.681551116333725 - }, - "tweetsentbr": { - "f1_macro,all": 0.4155725330795492, - "acc,all": 0.5830845771144278, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.7465864435011207, + "acc,all": 0.7561274509803921, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.5521492658011754, + "mse,all": 1.1086478758169935, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.24478442280945759, + "acc,exam_id__USP_2018": 0.25925925925925924, + "acc,exam_id__USP_2019": 0.275, + "acc,exam_id__USP_2023": 0.20454545454545456, + "acc,exam_id__USP_2022": 0.20408163265306123, + "acc,exam_id__UNICAMP_2023": 0.13953488372093023, + "acc,exam_id__UNICAMP_2018": 0.2777777777777778, + "acc,exam_id__UNICAMP_2019": 0.24, + "acc,exam_id__UNICAMP_2024": 0.28888888888888886, + "acc,exam_id__UNICAMP_2021_1": 0.2391304347826087, + "acc,exam_id__UNICAMP_2021_2": 0.27450980392156865, + "acc,exam_id__USP_2020": 0.32142857142857145, + "acc,exam_id__UNICAMP_2022": 0.3076923076923077, + "acc,exam_id__UNICAMP_2020": 0.2, + "acc,exam_id__USP_2024": 0.2926829268292683, + "acc,exam_id__USP_2021": 0.15384615384615385, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.2918124562631211, + "acc,exam_id__2011": 0.2564102564102564, + "acc,exam_id__2015": 0.31932773109243695, + "acc,exam_id__2017": 0.27586206896551724, + "acc,exam_id__2023": 0.26666666666666666, + "acc,exam_id__2016_2": 0.2764227642276423, + "acc,exam_id__2022": 0.3082706766917293, + "acc,exam_id__2009": 0.30434782608695654, + "acc,exam_id__2010": 0.358974358974359, + "acc,exam_id__2016": 0.2809917355371901, + "acc,exam_id__2014": 0.26605504587155965, + "acc,exam_id__2012": 0.29310344827586204, + "acc,exam_id__2013": 0.2962962962962963 + }, + "faquad_nli": { + "f1_macro,all": 0.3236178978633707, + "acc,all": 0.3292307692307692, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6902907646983476, + "acc,all": 0.7028571428571428 + }, + "oab_exams": { + "acc,all": 0.2979498861047836, + "acc,exam_id__2013-11": 0.3625, + "acc,exam_id__2016-19": 0.21794871794871795, + "acc,exam_id__2016-20a": 0.25, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2016-20": 0.3375, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2015-17": 0.38461538461538464, + "acc,exam_id__2017-24": 0.275, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2013-10": 0.35, + "acc,exam_id__2013-12": 0.325, + "acc,exam_id__2012-06": 0.3375, + "acc,exam_id__2014-15": 0.21794871794871795, + "acc,exam_id__2017-22": 0.325, + "acc,exam_id__2011-04": 0.2875, + "acc,exam_id__2016-21": 0.25, + "acc,exam_id__2012-07": 0.25, + "acc,exam_id__2012-08": 0.3, + "acc,exam_id__2015-18": 0.225, + "acc,exam_id__2015-16": 0.3125, + "acc,exam_id__2012-09": 0.23376623376623376, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2011-05": 0.3625, + "acc,exam_id__2010-02": 0.29, + "acc,exam_id__2014-14": 0.175, + "acc,exam_id__2017-23": 0.275, + "acc,exam_id__2014-13": 0.375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5682103706200091, + "acc,all": 0.681551116333725 + }, + "tweetsentbr": { + "f1_macro,all": 0.5540967107727324, + "acc,all": 0.5830845771144278, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "0fad0e1fe07cca0b4e6d7d31cfe411725dee49f8", - "model_dtype": "torch.float16", - "model_memory_footprint": 15074869248, - "model_num_parameters": 7268995072, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:2", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1466.7455065359477, - "min_seq_length": 1443, - "max_seq_length": 1533, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1690.7455065359477, - "min_seq_length": 1667, - "max_seq_length": 1757, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1747.9262865090404, - "min_seq_length": 1371, - "max_seq_length": 2548, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1648.039188243527, - "min_seq_length": 1382, - "max_seq_length": 2646, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1706.9876923076922, - "min_seq_length": 1651, - "max_seq_length": 1827, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1487.3878571428572, - "min_seq_length": 1464, - "max_seq_length": 1738, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "0fad0e1fe07cca0b4e6d7d31cfe411725dee49f8", + "model_dtype": "torch.float16", + "model_memory_footprint": 15074869248, + "model_num_parameters": 7268995072, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:2", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1393.764464692483, - "min_seq_length": 1127, - "max_seq_length": 1896, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1466.7455065359477, + "min_seq_length": 1443, + "max_seq_length": 1533, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1690.7455065359477, + "min_seq_length": 1667, + "max_seq_length": 1757, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1747.9262865090404, + "min_seq_length": 1371, + "max_seq_length": 2548, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1648.039188243527, + "min_seq_length": 1382, + "max_seq_length": 2646, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1706.9876923076922, + "min_seq_length": 1651, + "max_seq_length": 1827, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1487.3878571428572, + "min_seq_length": 1464, + "max_seq_length": 1738, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1393.764464692483, + "min_seq_length": 1127, + "max_seq_length": 1896, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1988.3360752056403, + "min_seq_length": 1953, + "max_seq_length": 2027, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1734.2492537313433, + "min_seq_length": 1713, + "max_seq_length": 1829, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1988.3360752056403, - "min_seq_length": 1953, - "max_seq_length": 2027, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Weni/WeniGPT-Mistral-7B-instructBase-4bit,dtype=float16,device=cuda:2,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1734.2492537313433, - "min_seq_length": 1713, - "max_seq_length": 1829, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Weni/WeniGPT-Mistral-7B-instructBase-4bit,dtype=float16,device=cuda:2,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "1158bba" + "git_hash": "1158bba" } \ No newline at end of file diff --git a/Weni/WeniGPT-Mistral-7B-instructBase-4bit/results_2024-04-14T01-06-41.169623.json b/Weni/WeniGPT-Mistral-7B-instructBase-4bit/results_2024-04-14T01-06-41.169623.json index 5cc93ab3531d1e191d380afd3666ec668ec9c3f4..815d92777e0f2a3483538ab92567bef6c1706af5 100644 --- a/Weni/WeniGPT-Mistral-7B-instructBase-4bit/results_2024-04-14T01-06-41.169623.json +++ b/Weni/WeniGPT-Mistral-7B-instructBase-4bit/results_2024-04-14T01-06-41.169623.json @@ -34,18 +34,18 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.42144494295041063, - "all_grouped_npm": 0.11442639256572346, + "all_grouped_average": 0.47438869093712427, + "all_grouped_npm": 0.2104959806626181, "all_grouped": { "enem_challenge": 0.2918124562631211, "bluex": 0.24478442280945759, "oab_exams": 0.2979498861047836, "assin2_rte": 0.7465864435011207, "assin2_sts": 0.5521492658011754, - "faquad_nli": 0.21574526524224713, - "hatebr_offensive": 0.4601938431322317, + "faquad_nli": 0.3236178978633707, + "hatebr_offensive": 0.6902907646983476, "portuguese_hate_speech": 0.5682103706200091, - "tweetsentbr": 0.4155725330795492 + "tweetsentbr": 0.5540967107727324 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.2918124562631211, @@ -53,10 +53,10 @@ "harness|oab_exams|oab_exams|None|3": 0.2979498861047836, "harness|assin2_rte|assin2_rte|None|15": 0.7465864435011207, "harness|assin2_sts|assin2_sts|None|15": 0.5521492658011754, - "harness|faquad_nli|faquad_nli|None|15": 0.21574526524224713, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4601938431322317, + "harness|faquad_nli|faquad_nli|None|15": 0.3236178978633707, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6902907646983476, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5682103706200091, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4155725330795492 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5540967107727324 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.2918124562631211, @@ -135,14 +135,14 @@ "main_score": 0.5521492658011754 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.21574526524224713, + "f1_macro,all": 0.3236178978633707, "acc,all": 0.3292307692307692, - "main_score": 0.21574526524224713 + "main_score": 0.3236178978633707 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.4601938431322317, + "f1_macro,all": 0.6902907646983476, "acc,all": 0.7028571428571428, - "main_score": 0.4601938431322317 + "main_score": 0.6902907646983476 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { "f1_macro,all": 0.5682103706200091, @@ -150,9 +150,9 @@ "main_score": 0.5682103706200091 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4155725330795492, + "f1_macro,all": 0.5540967107727324, "acc,all": 0.5830845771144278, - "main_score": 0.4155725330795492 + "main_score": 0.5540967107727324 } }, "config_tasks": { diff --git a/Weni/WeniGPT-Mistral-7B-instructBase/raw_2024-04-13T05-37-06.823274/results.json b/Weni/WeniGPT-Mistral-7B-instructBase/raw_2024-04-13T05-37-06.823274/results.json index 579c793cbebf456adfd10ef43667b53622a457a4..7af38f846e16ff472b7f4c5734025510fae9c790 100644 --- a/Weni/WeniGPT-Mistral-7B-instructBase/raw_2024-04-13T05-37-06.823274/results.json +++ b/Weni/WeniGPT-Mistral-7B-instructBase/raw_2024-04-13T05-37-06.823274/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.6142066914966826, - "acc,all": 0.6568627450980392, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.40022445504959214, - "mse,all": 1.297687934640523, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.23504867872044508, - "acc,exam_id__UNICAMP_2018": 0.25925925925925924, - "acc,exam_id__USP_2019": 0.175, - "acc,exam_id__UNICAMP_2021_2": 0.21568627450980393, - "acc,exam_id__UNICAMP_2022": 0.1794871794871795, - "acc,exam_id__USP_2021": 0.23076923076923078, - "acc,exam_id__UNICAMP_2024": 0.2222222222222222, - "acc,exam_id__USP_2022": 0.1836734693877551, - "acc,exam_id__UNICAMP_2020": 0.3090909090909091, - "acc,exam_id__USP_2024": 0.12195121951219512, - "acc,exam_id__USP_2018": 0.2222222222222222, - "acc,exam_id__USP_2023": 0.25, - "acc,exam_id__UNICAMP_2023": 0.3953488372093023, - "acc,exam_id__UNICAMP_2021_1": 0.1956521739130435, - "acc,exam_id__UNICAMP_2019": 0.28, - "acc,exam_id__USP_2020": 0.25, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.3261021693491952, - "acc,exam_id__2009": 0.3217391304347826, - "acc,exam_id__2011": 0.358974358974359, - "acc,exam_id__2010": 0.3504273504273504, - "acc,exam_id__2014": 0.3394495412844037, - "acc,exam_id__2016": 0.3305785123966942, - "acc,exam_id__2016_2": 0.3252032520325203, - "acc,exam_id__2013": 0.3425925925925926, - "acc,exam_id__2023": 0.26666666666666666, - "acc,exam_id__2022": 0.2932330827067669, - "acc,exam_id__2012": 0.33620689655172414, - "acc,exam_id__2017": 0.35344827586206895, - "acc,exam_id__2015": 0.31092436974789917 - }, - "faquad_nli": { - "f1_macro,all": 0.344957407970794, - "acc,all": 0.3476923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.44162029377851314, - "acc,all": 0.6785714285714286 - }, - "oab_exams": { - "acc,all": 0.2883826879271071, - "acc,exam_id__2016-21": 0.2875, - "acc,exam_id__2011-05": 0.3125, - "acc,exam_id__2012-06": 0.3, - "acc,exam_id__2011-03": 0.26262626262626265, - "acc,exam_id__2016-20a": 0.3125, - "acc,exam_id__2017-22": 0.325, - "acc,exam_id__2014-14": 0.2375, - "acc,exam_id__2012-09": 0.23376623376623376, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2018-25": 0.3125, - "acc,exam_id__2014-15": 0.21794871794871795, - "acc,exam_id__2012-08": 0.1875, - "acc,exam_id__2016-19": 0.34615384615384615, - "acc,exam_id__2017-23": 0.3, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2016-20": 0.2875, - "acc,exam_id__2013-12": 0.2625, - "acc,exam_id__2011-04": 0.25, - "acc,exam_id__2015-17": 0.2948717948717949, - "acc,exam_id__2013-11": 0.2625, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2010-02": 0.31, - "acc,exam_id__2012-07": 0.2625, - "acc,exam_id__2015-18": 0.3375, - "acc,exam_id__2015-16": 0.2625, - "acc,exam_id__2017-24": 0.325, - "acc,exam_id__2013-10": 0.3, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.39714398155956604, - "acc,all": 0.6674500587544065 - }, - "tweetsentbr": { - "f1_macro,all": 0.5118077502764405, - "acc,all": 0.5940298507462687, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6142066914966826, + "acc,all": 0.6568627450980392, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.40022445504959214, + "mse,all": 1.297687934640523, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.23504867872044508, + "acc,exam_id__UNICAMP_2018": 0.25925925925925924, + "acc,exam_id__USP_2019": 0.175, + "acc,exam_id__UNICAMP_2021_2": 0.21568627450980393, + "acc,exam_id__UNICAMP_2022": 0.1794871794871795, + "acc,exam_id__USP_2021": 0.23076923076923078, + "acc,exam_id__UNICAMP_2024": 0.2222222222222222, + "acc,exam_id__USP_2022": 0.1836734693877551, + "acc,exam_id__UNICAMP_2020": 0.3090909090909091, + "acc,exam_id__USP_2024": 0.12195121951219512, + "acc,exam_id__USP_2018": 0.2222222222222222, + "acc,exam_id__USP_2023": 0.25, + "acc,exam_id__UNICAMP_2023": 0.3953488372093023, + "acc,exam_id__UNICAMP_2021_1": 0.1956521739130435, + "acc,exam_id__UNICAMP_2019": 0.28, + "acc,exam_id__USP_2020": 0.25, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.3261021693491952, + "acc,exam_id__2009": 0.3217391304347826, + "acc,exam_id__2011": 0.358974358974359, + "acc,exam_id__2010": 0.3504273504273504, + "acc,exam_id__2014": 0.3394495412844037, + "acc,exam_id__2016": 0.3305785123966942, + "acc,exam_id__2016_2": 0.3252032520325203, + "acc,exam_id__2013": 0.3425925925925926, + "acc,exam_id__2023": 0.26666666666666666, + "acc,exam_id__2022": 0.2932330827067669, + "acc,exam_id__2012": 0.33620689655172414, + "acc,exam_id__2017": 0.35344827586206895, + "acc,exam_id__2015": 0.31092436974789917 + }, + "faquad_nli": { + "f1_macro,all": 0.344957407970794, + "acc,all": 0.3476923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6624304406677697, + "acc,all": 0.6785714285714286 + }, + "oab_exams": { + "acc,all": 0.2883826879271071, + "acc,exam_id__2016-21": 0.2875, + "acc,exam_id__2011-05": 0.3125, + "acc,exam_id__2012-06": 0.3, + "acc,exam_id__2011-03": 0.26262626262626265, + "acc,exam_id__2016-20a": 0.3125, + "acc,exam_id__2017-22": 0.325, + "acc,exam_id__2014-14": 0.2375, + "acc,exam_id__2012-09": 0.23376623376623376, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2018-25": 0.3125, + "acc,exam_id__2014-15": 0.21794871794871795, + "acc,exam_id__2012-08": 0.1875, + "acc,exam_id__2016-19": 0.34615384615384615, + "acc,exam_id__2017-23": 0.3, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2016-20": 0.2875, + "acc,exam_id__2013-12": 0.2625, + "acc,exam_id__2011-04": 0.25, + "acc,exam_id__2015-17": 0.2948717948717949, + "acc,exam_id__2013-11": 0.2625, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2010-02": 0.31, + "acc,exam_id__2012-07": 0.2625, + "acc,exam_id__2015-18": 0.3375, + "acc,exam_id__2015-16": 0.2625, + "acc,exam_id__2017-24": 0.325, + "acc,exam_id__2013-10": 0.3, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5957159723393489, + "acc,all": 0.6674500587544065 + }, + "tweetsentbr": { + "f1_macro,all": 0.5118077502764405, + "acc,all": 0.5940298507462687, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2ed75db10d083517809183815083ca3654374ae5", - "model_dtype": "torch.float16", - "model_memory_footprint": 15074869248, - "model_num_parameters": 7268995072, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1466.7455065359477, - "min_seq_length": 1443, - "max_seq_length": 1533, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1690.7455065359477, - "min_seq_length": 1667, - "max_seq_length": 1757, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1747.9262865090404, - "min_seq_length": 1371, - "max_seq_length": 2548, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1648.039188243527, - "min_seq_length": 1382, - "max_seq_length": 2646, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1706.9876923076922, - "min_seq_length": 1651, - "max_seq_length": 1827, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1487.3878571428572, - "min_seq_length": 1464, - "max_seq_length": 1738, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2ed75db10d083517809183815083ca3654374ae5", + "model_dtype": "torch.float16", + "model_memory_footprint": 15074869248, + "model_num_parameters": 7268995072, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1393.764464692483, - "min_seq_length": 1127, - "max_seq_length": 1896, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1466.7455065359477, + "min_seq_length": 1443, + "max_seq_length": 1533, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1690.7455065359477, + "min_seq_length": 1667, + "max_seq_length": 1757, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1747.9262865090404, + "min_seq_length": 1371, + "max_seq_length": 2548, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1648.039188243527, + "min_seq_length": 1382, + "max_seq_length": 2646, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1706.9876923076922, + "min_seq_length": 1651, + "max_seq_length": 1827, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1487.3878571428572, + "min_seq_length": 1464, + "max_seq_length": 1738, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1393.764464692483, + "min_seq_length": 1127, + "max_seq_length": 1896, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1988.3360752056403, + "min_seq_length": 1953, + "max_seq_length": 2027, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1734.2492537313433, + "min_seq_length": 1713, + "max_seq_length": 1829, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1988.3360752056403, - "min_seq_length": 1953, - "max_seq_length": 2027, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Weni/WeniGPT-Mistral-7B-instructBase,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1734.2492537313433, - "min_seq_length": 1713, - "max_seq_length": 1829, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Weni/WeniGPT-Mistral-7B-instructBase,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/Weni/WeniGPT-Mistral-7B-instructBase/results_2024-04-13T05-37-06.823274.json b/Weni/WeniGPT-Mistral-7B-instructBase/results_2024-04-13T05-37-06.823274.json index dc8dbac62388ae753d19e581f66ddcb709b8cae4..2bd50def83a24b892fda6f71c5adba6c991d2c12 100644 --- a/Weni/WeniGPT-Mistral-7B-instructBase/results_2024-04-13T05-37-06.823274.json +++ b/Weni/WeniGPT-Mistral-7B-instructBase/results_2024-04-13T05-37-06.823274.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.39549934623648175, - "all_grouped_npm": 0.07177075983532584, + "all_grouped_average": 0.4420973615330417, + "all_grouped_npm": 0.1631881545545385, "all_grouped": { "enem_challenge": 0.3261021693491952, "bluex": 0.23504867872044508, @@ -43,8 +43,8 @@ "assin2_rte": 0.6142066914966826, "assin2_sts": 0.40022445504959214, "faquad_nli": 0.344957407970794, - "hatebr_offensive": 0.44162029377851314, - "portuguese_hate_speech": 0.39714398155956604, + "hatebr_offensive": 0.6624304406677697, + "portuguese_hate_speech": 0.5957159723393489, "tweetsentbr": 0.5118077502764405 }, "all": { @@ -54,8 +54,8 @@ "harness|assin2_rte|assin2_rte|None|15": 0.6142066914966826, "harness|assin2_sts|assin2_sts|None|15": 0.40022445504959214, "harness|faquad_nli|faquad_nli|None|15": 0.344957407970794, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.44162029377851314, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.39714398155956604, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6624304406677697, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5957159723393489, "harness|tweetsentbr|tweetsentbr|None|25": 0.5118077502764405 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -140,14 +140,14 @@ "main_score": 0.344957407970794 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.44162029377851314, + "f1_macro,all": 0.6624304406677697, "acc,all": 0.6785714285714286, - "main_score": 0.44162029377851314 + "main_score": 0.6624304406677697 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.39714398155956604, + "f1_macro,all": 0.5957159723393489, "acc,all": 0.6674500587544065, - "main_score": 0.39714398155956604 + "main_score": 0.5957159723393489 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.5118077502764405, diff --git a/Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged/raw_2024-04-14T08-35-42.065682/results.json b/Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged/raw_2024-04-14T08-35-42.065682/results.json index a6da164a13147fd2562c970b6740d9f4f32f3552..6bb2f345c60611d0e328ac9e37e8cb9bd131c089 100644 --- a/Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged/raw_2024-04-14T08-35-42.065682/results.json +++ b/Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged/raw_2024-04-14T08-35-42.065682/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8937890773359962, - "acc,all": 0.8937908496732027, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7389572598424192, - "mse,all": 0.6124836601307189, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.4756606397774687, - "acc,exam_id__USP_2018": 0.4074074074074074, - "acc,exam_id__USP_2019": 0.475, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__USP_2022": 0.3877551020408163, - "acc,exam_id__UNICAMP_2023": 0.46511627906976744, - "acc,exam_id__UNICAMP_2018": 0.35185185185185186, - "acc,exam_id__UNICAMP_2019": 0.42, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2024": 0.5609756097560976, - "acc,exam_id__USP_2021": 0.4230769230769231, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5668299510146956, - "acc,exam_id__2011": 0.6153846153846154, - "acc,exam_id__2015": 0.5378151260504201, - "acc,exam_id__2017": 0.5344827586206896, - "acc,exam_id__2023": 0.6, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2022": 0.5413533834586466, - "acc,exam_id__2009": 0.5565217391304348, - "acc,exam_id__2010": 0.5641025641025641, - "acc,exam_id__2016": 0.5950413223140496, - "acc,exam_id__2014": 0.5779816513761468, - "acc,exam_id__2012": 0.5258620689655172, - "acc,exam_id__2013": 0.5462962962962963 - }, - "faquad_nli": { - "f1_macro,all": 0.6611065536414182, - "acc,all": 0.78, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8130154596146782, - "acc,all": 0.8171428571428572 - }, - "oab_exams": { - "acc,all": 0.3694760820045558, - "acc,exam_id__2013-11": 0.4125, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2016-20a": 0.325, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2015-17": 0.44871794871794873, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2012-06a": 0.3125, - "acc,exam_id__2013-10": 0.2875, - "acc,exam_id__2013-12": 0.3625, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2014-15": 0.38461538461538464, - "acc,exam_id__2017-22": 0.4, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2016-21": 0.325, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2015-18": 0.35, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2011-05": 0.3125, - "acc,exam_id__2010-02": 0.36, - "acc,exam_id__2014-14": 0.3875, - "acc,exam_id__2017-23": 0.4, - "acc,exam_id__2014-13": 0.3625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6903033807223998, - "acc,all": 0.7485311398354877 - }, - "tweetsentbr": { - "f1_macro,all": 0.4650145955538778, - "acc,all": 0.6825870646766169, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8937890773359962, + "acc,all": 0.8937908496732027, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7389572598424192, + "mse,all": 0.6124836601307189, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.4756606397774687, + "acc,exam_id__USP_2018": 0.4074074074074074, + "acc,exam_id__USP_2019": 0.475, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__USP_2022": 0.3877551020408163, + "acc,exam_id__UNICAMP_2023": 0.46511627906976744, + "acc,exam_id__UNICAMP_2018": 0.35185185185185186, + "acc,exam_id__UNICAMP_2019": 0.42, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2024": 0.5609756097560976, + "acc,exam_id__USP_2021": 0.4230769230769231, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5668299510146956, + "acc,exam_id__2011": 0.6153846153846154, + "acc,exam_id__2015": 0.5378151260504201, + "acc,exam_id__2017": 0.5344827586206896, + "acc,exam_id__2023": 0.6, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2022": 0.5413533834586466, + "acc,exam_id__2009": 0.5565217391304348, + "acc,exam_id__2010": 0.5641025641025641, + "acc,exam_id__2016": 0.5950413223140496, + "acc,exam_id__2014": 0.5779816513761468, + "acc,exam_id__2012": 0.5258620689655172, + "acc,exam_id__2013": 0.5462962962962963 + }, + "faquad_nli": { + "f1_macro,all": 0.6611065536414182, + "acc,all": 0.78, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8130154596146782, + "acc,all": 0.8171428571428572 + }, + "oab_exams": { + "acc,all": 0.3694760820045558, + "acc,exam_id__2013-11": 0.4125, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2016-20a": 0.325, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2015-17": 0.44871794871794873, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2012-06a": 0.3125, + "acc,exam_id__2013-10": 0.2875, + "acc,exam_id__2013-12": 0.3625, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2014-15": 0.38461538461538464, + "acc,exam_id__2017-22": 0.4, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2016-21": 0.325, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2015-18": 0.35, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2011-05": 0.3125, + "acc,exam_id__2010-02": 0.36, + "acc,exam_id__2014-14": 0.3875, + "acc,exam_id__2017-23": 0.4, + "acc,exam_id__2014-13": 0.3625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6903033807223998, + "acc,all": 0.7485311398354877 + }, + "tweetsentbr": { + "f1_macro,all": 0.6200194607385038, + "acc,all": 0.6825870646766169, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c8b80064184975c0bb56cb7ae8d3c982ccf8ade3", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:2", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c8b80064184975c0bb56cb7ae8d3c982ccf8ade3", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:2", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged,dtype=float16,device=cuda:2,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged,dtype=float16,device=cuda:2,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "1158bba" + "git_hash": "1158bba" } \ No newline at end of file diff --git a/Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged/results_2024-04-14T08-35-42.065682.json b/Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged/results_2024-04-14T08-35-42.065682.json index 96efc07d5459c224adb295f04b6e52abb7b5fed5..57743b14cf77bf305751dc9acd82087988894e9a 100644 --- a/Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged/results_2024-04-14T08-35-42.065682.json +++ b/Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged/results_2024-04-14T08-35-42.065682.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6304614443897232, - "all_grouped_npm": 0.4533706532934451, + "all_grouped_average": 0.647684207188015, + "all_grouped_npm": 0.478999764600427, "all_grouped": { "enem_challenge": 0.5668299510146956, "bluex": 0.4756606397774687, @@ -45,7 +45,7 @@ "faquad_nli": 0.6611065536414182, "hatebr_offensive": 0.8130154596146782, "portuguese_hate_speech": 0.6903033807223998, - "tweetsentbr": 0.4650145955538778 + "tweetsentbr": 0.6200194607385038 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5668299510146956, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6611065536414182, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8130154596146782, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6903033807223998, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4650145955538778 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6200194607385038 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5668299510146956, @@ -150,9 +150,9 @@ "main_score": 0.6903033807223998 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4650145955538778, + "f1_macro,all": 0.6200194607385038, "acc,all": 0.6825870646766169, - "main_score": 0.4650145955538778 + "main_score": 0.6200194607385038 } }, "config_tasks": { diff --git a/Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0/raw_2024-04-14T01-18-29.528053/results.json b/Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0/raw_2024-04-14T01-18-29.528053/results.json index 96b3b016bfb6a20cd363649da8a95f31cf3bfeb5..4f489b6272ae6d7a143a21f1d1a5ba9dd055728b 100644 --- a/Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0/raw_2024-04-14T01-18-29.528053/results.json +++ b/Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0/raw_2024-04-14T01-18-29.528053/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.894198481307534, - "acc,all": 0.8941993464052288, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7406309239618277, - "mse,all": 0.6155800653594771, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.48261474269819193, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__UNICAMP_2019": 0.42, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2023": 0.4883720930232558, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__UNICAMP_2018": 0.3148148148148148, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__USP_2018": 0.4074074074074074, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "acc,exam_id__USP_2024": 0.5853658536585366, - "acc,exam_id__USP_2022": 0.40816326530612246, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5717284814555633, - "acc,exam_id__2013": 0.5555555555555556, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2010": 0.5726495726495726, - "acc,exam_id__2017": 0.5431034482758621, - "acc,exam_id__2012": 0.5517241379310345, - "acc,exam_id__2014": 0.5596330275229358, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2009": 0.5652173913043478, - "acc,exam_id__2015": 0.5294117647058824, - "acc,exam_id__2011": 0.6324786324786325, - "acc,exam_id__2023": 0.6, - "acc,exam_id__2022": 0.556390977443609 - }, - "faquad_nli": { - "f1_macro,all": 0.6566680057730363, - "acc,all": 0.7815384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8052786021889948, - "acc,all": 0.81 - }, - "oab_exams": { - "acc,all": 0.36583143507972665, - "acc,exam_id__2015-18": 0.35, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2016-20a": 0.3125, - "acc,exam_id__2013-11": 0.375, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2014-15": 0.41025641025641024, - "acc,exam_id__2014-14": 0.3875, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2017-22": 0.375, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2016-19": 0.4230769230769231, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2012-06a": 0.3375, - "acc,exam_id__2013-10": 0.3125, - "acc,exam_id__2017-23": 0.375, - "acc,exam_id__2016-21": 0.3375, - "acc,exam_id__2011-04": 0.3625, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2013-12": 0.3625, - "acc,exam_id__2012-08": 0.3875, - "acc,exam_id__2015-17": 0.44871794871794873, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2010-02": 0.37, - "acc,exam_id__2011-05": 0.3125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6932413017569821, - "acc,all": 0.754406580493537 - }, - "tweetsentbr": { - "f1_macro,all": 0.46933677553982683, - "acc,all": 0.6860696517412935, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.894198481307534, + "acc,all": 0.8941993464052288, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7406309239618277, + "mse,all": 0.6155800653594771, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.48261474269819193, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__UNICAMP_2019": 0.42, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2023": 0.4883720930232558, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__UNICAMP_2018": 0.3148148148148148, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__USP_2018": 0.4074074074074074, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "acc,exam_id__USP_2024": 0.5853658536585366, + "acc,exam_id__USP_2022": 0.40816326530612246, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5717284814555633, + "acc,exam_id__2013": 0.5555555555555556, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2010": 0.5726495726495726, + "acc,exam_id__2017": 0.5431034482758621, + "acc,exam_id__2012": 0.5517241379310345, + "acc,exam_id__2014": 0.5596330275229358, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2009": 0.5652173913043478, + "acc,exam_id__2015": 0.5294117647058824, + "acc,exam_id__2011": 0.6324786324786325, + "acc,exam_id__2023": 0.6, + "acc,exam_id__2022": 0.556390977443609 + }, + "faquad_nli": { + "f1_macro,all": 0.6566680057730363, + "acc,all": 0.7815384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8052786021889948, + "acc,all": 0.81 + }, + "oab_exams": { + "acc,all": 0.36583143507972665, + "acc,exam_id__2015-18": 0.35, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2016-20a": 0.3125, + "acc,exam_id__2013-11": 0.375, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2014-15": 0.41025641025641024, + "acc,exam_id__2014-14": 0.3875, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2017-22": 0.375, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2016-19": 0.4230769230769231, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2012-06a": 0.3375, + "acc,exam_id__2013-10": 0.3125, + "acc,exam_id__2017-23": 0.375, + "acc,exam_id__2016-21": 0.3375, + "acc,exam_id__2011-04": 0.3625, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2013-12": 0.3625, + "acc,exam_id__2012-08": 0.3875, + "acc,exam_id__2015-17": 0.44871794871794873, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2010-02": 0.37, + "acc,exam_id__2011-05": 0.3125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6932413017569821, + "acc,all": 0.754406580493537 + }, + "tweetsentbr": { + "f1_macro,all": 0.6257823673864359, + "acc,all": 0.6860696517412935, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c8b80064184975c0bb56cb7ae8d3c982ccf8ade3", - "model_dtype": "torch.float16", - "model_memory_footprint": 15033974784, - "model_num_parameters": 7248547840, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c8b80064184975c0bb56cb7ae8d3c982ccf8ade3", + "model_dtype": "torch.float16", + "model_memory_footprint": 15033974784, + "model_num_parameters": 7248547840, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged,peft=Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=Weni/ZeroShot-3.3.34-Mistral-7b-Multilanguage-3.3.0-merged,peft=Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "1158bba" + "git_hash": "1158bba" } \ No newline at end of file diff --git a/Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0/results_2024-04-14T01-18-29.528053.json b/Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0/results_2024-04-14T01-18-29.528053.json index 4ac99c13f3e2a90cc8bb6d8977b6c31e80df8477..262ef83943bdb823e8cf6fc6cd06715cad0fb436 100644 --- a/Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0/results_2024-04-14T01-18-29.528053.json +++ b/Weni/ZeroShot-3.4.22-Mistral-7b-DPO-1.0.0/results_2024-04-14T01-18-29.528053.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6310587499735205, - "all_grouped_npm": 0.45350033674566465, + "all_grouped_average": 0.6484415935120326, + "all_grouped_npm": 0.47936766343987913, "all_grouped": { "enem_challenge": 0.5717284814555633, "bluex": 0.48261474269819193, @@ -45,7 +45,7 @@ "faquad_nli": 0.6566680057730363, "hatebr_offensive": 0.8052786021889948, "portuguese_hate_speech": 0.6932413017569821, - "tweetsentbr": 0.46933677553982683 + "tweetsentbr": 0.6257823673864359 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5717284814555633, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6566680057730363, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8052786021889948, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6932413017569821, - "harness|tweetsentbr|tweetsentbr|None|25": 0.46933677553982683 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6257823673864359 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5717284814555633, @@ -150,9 +150,9 @@ "main_score": 0.6932413017569821 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.46933677553982683, + "f1_macro,all": 0.6257823673864359, "acc,all": 0.6860696517412935, - "main_score": 0.46933677553982683 + "main_score": 0.6257823673864359 } }, "config_tasks": { diff --git a/abhishek/autotrain-llama3-orpo-v2/raw_2024-04-27T05-52-38.138924/results.json b/abhishek/autotrain-llama3-orpo-v2/raw_2024-04-27T05-52-38.138924/results.json index e524513c2619805da84aa377f8ed44ffedfc5bff..61e9a9bccb7662c5bb7f761f82f6c6d3aa7797d3 100644 --- a/abhishek/autotrain-llama3-orpo-v2/raw_2024-04-27T05-52-38.138924/results.json +++ b/abhishek/autotrain-llama3-orpo-v2/raw_2024-04-27T05-52-38.138924/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.2137462957652232, - "acc,all": 0.45669934640522875, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.11473255710076143, - "mse,all": 2.8526266339869277, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.0, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__USP_2020": 0.0, - "acc,exam_id__USP_2024": 0.0, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__UNICAMP_2019": 0.0, - "acc,exam_id__USP_2018": 0.0, - "acc,exam_id__USP_2022": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__USP_2023": 0.0, - "acc,exam_id__USP_2021": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__UNICAMP_2018": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.0006997900629811056, - "acc,exam_id__2015": 0.0, - "acc,exam_id__2011": 0.0, - "acc,exam_id__2009": 0.0, - "acc,exam_id__2016_2": 0.0, - "acc,exam_id__2012": 0.0, - "acc,exam_id__2022": 0.0, - "acc,exam_id__2014": 0.009174311926605505, - "acc,exam_id__2023": 0.0, - "acc,exam_id__2010": 0.0, - "acc,exam_id__2013": 0.0, - "acc,exam_id__2016": 0.0, - "acc,exam_id__2017": 0.0 - }, - "faquad_nli": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.21996779388083734, - "acc,all": 0.4878571428571429 - }, - "oab_exams": { - "acc,all": 0.0, - "acc,exam_id__2013-10": 0.0, - "acc,exam_id__2017-23": 0.0, - "acc,exam_id__2012-09": 0.0, - "acc,exam_id__2017-24": 0.0, - "acc,exam_id__2017-22": 0.0, - "acc,exam_id__2012-06a": 0.0, - "acc,exam_id__2016-21": 0.0, - "acc,exam_id__2013-12": 0.0, - "acc,exam_id__2014-15": 0.0, - "acc,exam_id__2011-04": 0.0, - "acc,exam_id__2011-03": 0.0, - "acc,exam_id__2016-20": 0.0, - "acc,exam_id__2016-20a": 0.0, - "acc,exam_id__2015-17": 0.0, - "acc,exam_id__2015-18": 0.0, - "acc,exam_id__2015-16": 0.0, - "acc,exam_id__2018-25": 0.0, - "acc,exam_id__2010-02": 0.0, - "acc,exam_id__2013-11": 0.0, - "acc,exam_id__2014-14": 0.0, - "acc,exam_id__2012-08": 0.0, - "acc,exam_id__2012-06": 0.0, - "acc,exam_id__2016-19": 0.0, - "acc,exam_id__2014-13": 0.0, - "acc,exam_id__2012-07": 0.0, - "acc,exam_id__2010-01": 0.0, - "acc,exam_id__2011-05": 0.0, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.22986425339366515, - "acc,all": 0.2984723854289072 - }, - "tweetsentbr": { - "f1_macro,all": 0.17647896235368554, - "acc,all": 0.2527363184079602, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.3206194436478348, + "acc,all": 0.45669934640522875, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.11473255710076143, + "mse,all": 2.8526266339869277, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.0, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__USP_2020": 0.0, + "acc,exam_id__USP_2024": 0.0, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__UNICAMP_2019": 0.0, + "acc,exam_id__USP_2018": 0.0, + "acc,exam_id__USP_2022": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__USP_2023": 0.0, + "acc,exam_id__USP_2021": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__UNICAMP_2018": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.0006997900629811056, + "acc,exam_id__2015": 0.0, + "acc,exam_id__2011": 0.0, + "acc,exam_id__2009": 0.0, + "acc,exam_id__2016_2": 0.0, + "acc,exam_id__2012": 0.0, + "acc,exam_id__2022": 0.0, + "acc,exam_id__2014": 0.009174311926605505, + "acc,exam_id__2023": 0.0, + "acc,exam_id__2010": 0.0, + "acc,exam_id__2013": 0.0, + "acc,exam_id__2016": 0.0, + "acc,exam_id__2017": 0.0 + }, + "faquad_nli": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.329951690821256, + "acc,all": 0.4878571428571429 + }, + "oab_exams": { + "acc,all": 0.0, + "acc,exam_id__2013-10": 0.0, + "acc,exam_id__2017-23": 0.0, + "acc,exam_id__2012-09": 0.0, + "acc,exam_id__2017-24": 0.0, + "acc,exam_id__2017-22": 0.0, + "acc,exam_id__2012-06a": 0.0, + "acc,exam_id__2016-21": 0.0, + "acc,exam_id__2013-12": 0.0, + "acc,exam_id__2014-15": 0.0, + "acc,exam_id__2011-04": 0.0, + "acc,exam_id__2011-03": 0.0, + "acc,exam_id__2016-20": 0.0, + "acc,exam_id__2016-20a": 0.0, + "acc,exam_id__2015-17": 0.0, + "acc,exam_id__2015-18": 0.0, + "acc,exam_id__2015-16": 0.0, + "acc,exam_id__2018-25": 0.0, + "acc,exam_id__2010-02": 0.0, + "acc,exam_id__2013-11": 0.0, + "acc,exam_id__2014-14": 0.0, + "acc,exam_id__2012-08": 0.0, + "acc,exam_id__2012-06": 0.0, + "acc,exam_id__2016-19": 0.0, + "acc,exam_id__2014-13": 0.0, + "acc,exam_id__2012-07": 0.0, + "acc,exam_id__2010-01": 0.0, + "acc,exam_id__2011-05": 0.0, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.22986425339366515, + "acc,all": 0.2984723854289072 + }, + "tweetsentbr": { + "f1_macro,all": 0.2353052831382474, + "acc,all": 0.2527363184079602, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "1655d0683696a5de2eb9a59c339ee469297beb9c", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16194879488, - "model_num_parameters": 8030326784, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1283.5322712418301, - "min_seq_length": 1264, - "max_seq_length": 1347, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1474.5322712418301, - "min_seq_length": 1455, - "max_seq_length": 1538, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1473.769123783032, - "min_seq_length": 1154, - "max_seq_length": 2123, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1401.3547935619315, - "min_seq_length": 1176, - "max_seq_length": 2329, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1412.8215384615385, - "min_seq_length": 1367, - "max_seq_length": 1509, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "1655d0683696a5de2eb9a59c339ee469297beb9c", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16194879488, + "model_num_parameters": 8030326784, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1224.3878571428572, - "min_seq_length": 1204, - "max_seq_length": 1443, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1209.3772209567198, - "min_seq_length": 977, - "max_seq_length": 1643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1283.5322712418301, + "min_seq_length": 1264, + "max_seq_length": 1347, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1474.5322712418301, + "min_seq_length": 1455, + "max_seq_length": 1538, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1473.769123783032, + "min_seq_length": 1154, + "max_seq_length": 2123, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1401.3547935619315, + "min_seq_length": 1176, + "max_seq_length": 2329, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1412.8215384615385, + "min_seq_length": 1367, + "max_seq_length": 1509, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1224.3878571428572, + "min_seq_length": 1204, + "max_seq_length": 1443, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1209.3772209567198, + "min_seq_length": 977, + "max_seq_length": 1643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1621.4195064629848, + "min_seq_length": 1591, + "max_seq_length": 1653, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1482.1537313432837, + "min_seq_length": 1465, + "max_seq_length": 1530, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1621.4195064629848, - "min_seq_length": 1591, - "max_seq_length": 1653, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=abhishek/autotrain-llama3-orpo-v2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1482.1537313432837, - "min_seq_length": 1465, - "max_seq_length": 1530, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=abhishek/autotrain-llama3-orpo-v2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/abhishek/autotrain-llama3-orpo-v2/results_2024-04-27T05-52-38.138924.json b/abhishek/autotrain-llama3-orpo-v2/results_2024-04-27T05-52-38.138924.json index a9cda55d3c27f3de8ed70d4e08d46a3bfd91f89c..d435b78d22a931b4d06ec30543dbae3cde54a279 100644 --- a/abhishek/autotrain-llama3-orpo-v2/results_2024-04-27T05-52-38.138924.json +++ b/abhishek/autotrain-llama3-orpo-v2/results_2024-04-27T05-52-38.138924.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.10616551695079485, - "all_grouped_npm": -0.3813912313516803, + "all_grouped_average": 0.13679700201830508, + "all_grouped_npm": -0.3234742027427658, "all_grouped": { "enem_challenge": 0.0006997900629811056, "bluex": 0.0, "oab_exams": 0.0, - "assin2_rte": 0.2137462957652232, + "assin2_rte": 0.3206194436478348, "assin2_sts": 0.11473255710076143, "faquad_nli": 0.0, - "hatebr_offensive": 0.21996779388083734, + "hatebr_offensive": 0.329951690821256, "portuguese_hate_speech": 0.22986425339366515, - "tweetsentbr": 0.17647896235368554 + "tweetsentbr": 0.2353052831382474 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.0006997900629811056, "harness|bluex|bluex|None|3": 0.0, "harness|oab_exams|oab_exams|None|3": 0.0, - "harness|assin2_rte|assin2_rte|None|15": 0.2137462957652232, + "harness|assin2_rte|assin2_rte|None|15": 0.3206194436478348, "harness|assin2_sts|assin2_sts|None|15": 0.11473255710076143, "harness|faquad_nli|faquad_nli|None|15": 0.0, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.21996779388083734, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.329951690821256, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.22986425339366515, - "harness|tweetsentbr|tweetsentbr|None|25": 0.17647896235368554 + "harness|tweetsentbr|tweetsentbr|None|25": 0.2353052831382474 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.0006997900629811056, @@ -125,9 +125,9 @@ "main_score": 0.0 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.2137462957652232, + "f1_macro,all": 0.3206194436478348, "acc,all": 0.45669934640522875, - "main_score": 0.2137462957652232 + "main_score": 0.3206194436478348 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.11473255710076143, @@ -140,9 +140,9 @@ "main_score": 0.0 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.21996779388083734, + "f1_macro,all": 0.329951690821256, "acc,all": 0.4878571428571429, - "main_score": 0.21996779388083734 + "main_score": 0.329951690821256 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { "f1_macro,all": 0.22986425339366515, @@ -150,9 +150,9 @@ "main_score": 0.22986425339366515 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.17647896235368554, + "f1_macro,all": 0.2353052831382474, "acc,all": 0.2527363184079602, - "main_score": 0.17647896235368554 + "main_score": 0.2353052831382474 } }, "config_tasks": { diff --git a/ai-forever/mGPT-13B/raw_2024-06-17T01-34-12.100171/results.json b/ai-forever/mGPT-13B/raw_2024-06-17T01-34-12.100171/results.json index 09bbc56ac3a9ca5ead2b5e2395b60772bcbbd6bd..81ffa3be641351bcc5c81caae5ee94836817e22c 100644 --- a/ai-forever/mGPT-13B/raw_2024-06-17T01-34-12.100171/results.json +++ b/ai-forever/mGPT-13B/raw_2024-06-17T01-34-12.100171/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.005823464030093911, - "mse,all": 3.0598163807189542, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.1043115438108484, - "acc,exam_id__USP_2019": 0.125, - "acc,exam_id__USP_2020": 0.08928571428571429, - "acc,exam_id__UNICAMP_2020": 0.09090909090909091, - "acc,exam_id__UNICAMP_2021_1": 0.15217391304347827, - "acc,exam_id__USP_2018": 0.09259259259259259, - "acc,exam_id__USP_2024": 0.07317073170731707, - "acc,exam_id__UNICAMP_2021_2": 0.058823529411764705, - "acc,exam_id__USP_2021": 0.15384615384615385, - "acc,exam_id__UNICAMP_2023": 0.11627906976744186, - "acc,exam_id__UNICAMP_2024": 0.15555555555555556, - "acc,exam_id__UNICAMP_2019": 0.14, - "acc,exam_id__USP_2022": 0.061224489795918366, - "acc,exam_id__UNICAMP_2018": 0.09259259259259259, - "acc,exam_id__UNICAMP_2022": 0.07692307692307693, - "acc,exam_id__USP_2023": 0.09090909090909091, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.16655003498950316, - "acc,exam_id__2013": 0.1111111111111111, - "acc,exam_id__2016_2": 0.18699186991869918, - "acc,exam_id__2016": 0.1652892561983471, - "acc,exam_id__2022": 0.17293233082706766, - "acc,exam_id__2010": 0.15384615384615385, - "acc,exam_id__2009": 0.1391304347826087, - "acc,exam_id__2023": 0.2518518518518518, - "acc,exam_id__2011": 0.18803418803418803, - "acc,exam_id__2015": 0.12605042016806722, - "acc,exam_id__2017": 0.14655172413793102, - "acc,exam_id__2014": 0.1743119266055046, - "acc,exam_id__2012": 0.16379310344827586 - }, - "faquad_nli": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.10787708252260882, - "acc,all": 0.11071428571428571 - }, - "oab_exams": { - "acc,all": 0.0856492027334852, - "acc,exam_id__2012-07": 0.0375, - "acc,exam_id__2016-21": 0.125, - "acc,exam_id__2016-20a": 0.1, - "acc,exam_id__2014-13": 0.0625, - "acc,exam_id__2010-01": 0.047058823529411764, - "acc,exam_id__2018-25": 0.0625, - "acc,exam_id__2012-09": 0.07792207792207792, - "acc,exam_id__2015-18": 0.125, - "acc,exam_id__2017-24": 0.1375, - "acc,exam_id__2014-14": 0.0875, - "acc,exam_id__2017-23": 0.0875, - "acc,exam_id__2012-06": 0.075, - "acc,exam_id__2013-10": 0.0875, - "acc,exam_id__2013-11": 0.0875, - "acc,exam_id__2015-17": 0.07692307692307693, - "acc,exam_id__2012-08": 0.05, - "acc,exam_id__2016-19": 0.05128205128205128, - "acc,exam_id__2011-04": 0.0125, - "acc,exam_id__2011-05": 0.1, - "acc,exam_id__2012-06a": 0.125, - "acc,exam_id__2010-02": 0.07, - "acc,exam_id__2014-15": 0.08974358974358974, - "acc,exam_id__2013-12": 0.05, - "acc,exam_id__2016-20": 0.125, - "acc,exam_id__2015-16": 0.1375, - "acc,exam_id__2011-03": 0.1111111111111111, - "acc,exam_id__2017-22": 0.1125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.28123987051929183, - "acc,all": 0.6956521739130435 - }, - "tweetsentbr": { - "f1_macro,all": 0.11361491459055105, - "acc,all": 0.2845771144278607, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.005823464030093911, + "mse,all": 3.0598163807189542, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.1043115438108484, + "acc,exam_id__USP_2019": 0.125, + "acc,exam_id__USP_2020": 0.08928571428571429, + "acc,exam_id__UNICAMP_2020": 0.09090909090909091, + "acc,exam_id__UNICAMP_2021_1": 0.15217391304347827, + "acc,exam_id__USP_2018": 0.09259259259259259, + "acc,exam_id__USP_2024": 0.07317073170731707, + "acc,exam_id__UNICAMP_2021_2": 0.058823529411764705, + "acc,exam_id__USP_2021": 0.15384615384615385, + "acc,exam_id__UNICAMP_2023": 0.11627906976744186, + "acc,exam_id__UNICAMP_2024": 0.15555555555555556, + "acc,exam_id__UNICAMP_2019": 0.14, + "acc,exam_id__USP_2022": 0.061224489795918366, + "acc,exam_id__UNICAMP_2018": 0.09259259259259259, + "acc,exam_id__UNICAMP_2022": 0.07692307692307693, + "acc,exam_id__USP_2023": 0.09090909090909091, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.16655003498950316, + "acc,exam_id__2013": 0.1111111111111111, + "acc,exam_id__2016_2": 0.18699186991869918, + "acc,exam_id__2016": 0.1652892561983471, + "acc,exam_id__2022": 0.17293233082706766, + "acc,exam_id__2010": 0.15384615384615385, + "acc,exam_id__2009": 0.1391304347826087, + "acc,exam_id__2023": 0.2518518518518518, + "acc,exam_id__2011": 0.18803418803418803, + "acc,exam_id__2015": 0.12605042016806722, + "acc,exam_id__2017": 0.14655172413793102, + "acc,exam_id__2014": 0.1743119266055046, + "acc,exam_id__2012": 0.16379310344827586 + }, + "faquad_nli": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.16181562378391323, + "acc,all": 0.11071428571428571 + }, + "oab_exams": { + "acc,all": 0.0856492027334852, + "acc,exam_id__2012-07": 0.0375, + "acc,exam_id__2016-21": 0.125, + "acc,exam_id__2016-20a": 0.1, + "acc,exam_id__2014-13": 0.0625, + "acc,exam_id__2010-01": 0.047058823529411764, + "acc,exam_id__2018-25": 0.0625, + "acc,exam_id__2012-09": 0.07792207792207792, + "acc,exam_id__2015-18": 0.125, + "acc,exam_id__2017-24": 0.1375, + "acc,exam_id__2014-14": 0.0875, + "acc,exam_id__2017-23": 0.0875, + "acc,exam_id__2012-06": 0.075, + "acc,exam_id__2013-10": 0.0875, + "acc,exam_id__2013-11": 0.0875, + "acc,exam_id__2015-17": 0.07692307692307693, + "acc,exam_id__2012-08": 0.05, + "acc,exam_id__2016-19": 0.05128205128205128, + "acc,exam_id__2011-04": 0.0125, + "acc,exam_id__2011-05": 0.1, + "acc,exam_id__2012-06a": 0.125, + "acc,exam_id__2010-02": 0.07, + "acc,exam_id__2014-15": 0.08974358974358974, + "acc,exam_id__2013-12": 0.05, + "acc,exam_id__2016-20": 0.125, + "acc,exam_id__2015-16": 0.1375, + "acc,exam_id__2011-03": 0.1111111111111111, + "acc,exam_id__2017-22": 0.1125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4218598057789377, + "acc,all": 0.6956521739130435 + }, + "tweetsentbr": { + "f1_macro,all": 0.1514865527874014, + "acc,all": 0.2845771144278607, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 10, - "non_truncated": 14140, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 13, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "1927423a35e8e72ea43b564d7ad49f9ee81fc284", - "model_dtype": "torch.float16", - "model_memory_footprint": 26383913040, - "model_num_parameters": 13108070400, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1103.3361928104575, - "min_seq_length": 1084, - "max_seq_length": 1158, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1295.3361928104575, - "min_seq_length": 1276, - "max_seq_length": 1350, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 3, - "non_truncated": 716, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 3, - "mean_seq_length": 1422.0653685674547, - "min_seq_length": 1106, - "max_seq_length": 2073, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.995827538247566 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 7, - "non_truncated": 1422, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 10, - "mean_seq_length": 1298.7543736878936, - "min_seq_length": 1083, - "max_seq_length": 2939, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.993002099370189 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1229.4092307692308, - "min_seq_length": 1188, - "max_seq_length": 1309, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 10, + "non_truncated": 14140, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 13, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "1927423a35e8e72ea43b564d7ad49f9ee81fc284", + "model_dtype": "torch.float16", + "model_memory_footprint": 26383913040, + "model_num_parameters": 13108070400, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1130.1992857142857, - "min_seq_length": 1108, - "max_seq_length": 1355, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1067.2441913439636, - "min_seq_length": 854, - "max_seq_length": 1428, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1103.3361928104575, + "min_seq_length": 1084, + "max_seq_length": 1158, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1295.3361928104575, + "min_seq_length": 1276, + "max_seq_length": 1350, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 3, + "non_truncated": 716, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 3, + "mean_seq_length": 1422.0653685674547, + "min_seq_length": 1106, + "max_seq_length": 2073, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.995827538247566 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 7, + "non_truncated": 1422, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 10, + "mean_seq_length": 1298.7543736878936, + "min_seq_length": 1083, + "max_seq_length": 2939, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.993002099370189 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1229.4092307692308, + "min_seq_length": 1188, + "max_seq_length": 1309, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1130.1992857142857, + "min_seq_length": 1108, + "max_seq_length": 1355, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1067.2441913439636, + "min_seq_length": 854, + "max_seq_length": 1428, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1547.7579318448884, + "min_seq_length": 1515, + "max_seq_length": 1597, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1386.5024875621891, + "min_seq_length": 1366, + "max_seq_length": 1510, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1547.7579318448884, - "min_seq_length": 1515, - "max_seq_length": 1597, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=ai-forever/mGPT-13B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1386.5024875621891, - "min_seq_length": 1366, - "max_seq_length": 1510, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=ai-forever/mGPT-13B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/ai-forever/mGPT-13B/results_2024-06-17T01-34-12.100171.json b/ai-forever/mGPT-13B/results_2024-06-17T01-34-12.100171.json index 46560cfa1d8253635fe543e668e9364cb69512c3..01732b3207cceb07c8cc8d80e17fae0c34742520 100644 --- a/ai-forever/mGPT-13B/results_2024-06-17T01-34-12.100171.json +++ b/ai-forever/mGPT-13B/results_2024-06-17T01-34-12.100171.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.09611845702182026, - "all_grouped_npm": -0.41465944069661226, + "all_grouped_average": 0.12194402532379812, + "all_grouped_npm": -0.36642193031542736, "all_grouped": { "enem_challenge": 0.16655003498950316, "bluex": 0.1043115438108484, @@ -43,9 +43,9 @@ "assin2_rte": 0.0, "assin2_sts": 0.005823464030093911, "faquad_nli": 0.0, - "hatebr_offensive": 0.10787708252260882, - "portuguese_hate_speech": 0.28123987051929183, - "tweetsentbr": 0.11361491459055105 + "hatebr_offensive": 0.16181562378391323, + "portuguese_hate_speech": 0.4218598057789377, + "tweetsentbr": 0.1514865527874014 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.16655003498950316, @@ -54,9 +54,9 @@ "harness|assin2_rte|assin2_rte|None|15": 0.0, "harness|assin2_sts|assin2_sts|None|15": 0.005823464030093911, "harness|faquad_nli|faquad_nli|None|15": 0.0, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.10787708252260882, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.28123987051929183, - "harness|tweetsentbr|tweetsentbr|None|25": 0.11361491459055105 + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.16181562378391323, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4218598057789377, + "harness|tweetsentbr|tweetsentbr|None|25": 0.1514865527874014 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.16655003498950316, @@ -140,19 +140,19 @@ "main_score": 0.0 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.10787708252260882, + "f1_macro,all": 0.16181562378391323, "acc,all": 0.11071428571428571, - "main_score": 0.10787708252260882 + "main_score": 0.16181562378391323 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.28123987051929183, + "f1_macro,all": 0.4218598057789377, "acc,all": 0.6956521739130435, - "main_score": 0.28123987051929183 + "main_score": 0.4218598057789377 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.11361491459055105, + "f1_macro,all": 0.1514865527874014, "acc,all": 0.2845771144278607, - "main_score": 0.11361491459055105 + "main_score": 0.1514865527874014 } }, "config_tasks": { diff --git a/allenai/tulu-2-dpo-70b/raw_2024-05-22T02-18-14.364873/results.json b/allenai/tulu-2-dpo-70b/raw_2024-05-22T02-18-14.364873/results.json index d25b730debb700d1f4a7ee350293e5d9526101cc..1c74efb6dd2e6bef5ad4d387c90889dd18a2012c 100644 --- a/allenai/tulu-2-dpo-70b/raw_2024-05-22T02-18-14.364873/results.json +++ b/allenai/tulu-2-dpo-70b/raw_2024-05-22T02-18-14.364873/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9317329158205281, - "acc,all": 0.931781045751634, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7834390660894048, - "mse,all": 0.48135620915032673, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.6300417246175244, - "acc,exam_id__USP_2022": 0.6122448979591837, - "acc,exam_id__USP_2019": 0.575, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__USP_2023": 0.8181818181818182, - "acc,exam_id__UNICAMP_2019": 0.7, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__UNICAMP_2023": 0.6511627906976745, - "acc,exam_id__USP_2020": 0.625, - "acc,exam_id__USP_2021": 0.6923076923076923, - "acc,exam_id__UNICAMP_2018": 0.5740740740740741, - "acc,exam_id__UNICAMP_2024": 0.6, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7235829251224632, - "acc,exam_id__2016": 0.6694214876033058, - "acc,exam_id__2023": 0.762962962962963, - "acc,exam_id__2014": 0.6972477064220184, - "acc,exam_id__2009": 0.7304347826086957, - "acc,exam_id__2017": 0.7758620689655172, - "acc,exam_id__2016_2": 0.7073170731707317, - "acc,exam_id__2011": 0.7692307692307693, - "acc,exam_id__2015": 0.7226890756302521, - "acc,exam_id__2022": 0.631578947368421, - "acc,exam_id__2012": 0.7327586206896551, - "acc,exam_id__2013": 0.7592592592592593, - "acc,exam_id__2010": 0.7350427350427351 - }, - "faquad_nli": { - "f1_macro,all": 0.8339739223508806, - "acc,all": 0.8707692307692307, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8236992331909445, - "acc,all": 0.8278571428571428 - }, - "oab_exams": { - "acc,all": 0.5061503416856492, - "acc,exam_id__2011-03": 0.45454545454545453, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2012-06a": 0.5, - "acc,exam_id__2017-24": 0.4625, - "acc,exam_id__2012-09": 0.5064935064935064, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2015-17": 0.6282051282051282, - "acc,exam_id__2017-23": 0.5125, - "acc,exam_id__2010-02": 0.54, - "acc,exam_id__2012-06": 0.5125, - "acc,exam_id__2018-25": 0.4875, - "acc,exam_id__2016-21": 0.45, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2013-12": 0.575, - "acc,exam_id__2017-22": 0.6375, - "acc,exam_id__2012-07": 0.4625, - "acc,exam_id__2016-20a": 0.5, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2016-19": 0.5512820512820513, - "acc,exam_id__2014-13": 0.45, - "acc,exam_id__2012-08": 0.4375, - "acc,exam_id__2013-10": 0.5875, - "acc,exam_id__2011-05": 0.575, - "acc,exam_id__2016-20": 0.525, - "acc,exam_id__2014-15": 0.6153846153846154, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7193440067158361, - "acc,all": 0.7414806110458284 - }, - "tweetsentbr": { - "f1_macro,all": 0.5397465968579614, - "acc,all": 0.7333333333333333, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9317329158205281, + "acc,all": 0.931781045751634, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7834390660894048, + "mse,all": 0.48135620915032673, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.6300417246175244, + "acc,exam_id__USP_2022": 0.6122448979591837, + "acc,exam_id__USP_2019": 0.575, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__USP_2023": 0.8181818181818182, + "acc,exam_id__UNICAMP_2019": 0.7, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__UNICAMP_2023": 0.6511627906976745, + "acc,exam_id__USP_2020": 0.625, + "acc,exam_id__USP_2021": 0.6923076923076923, + "acc,exam_id__UNICAMP_2018": 0.5740740740740741, + "acc,exam_id__UNICAMP_2024": 0.6, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7235829251224632, + "acc,exam_id__2016": 0.6694214876033058, + "acc,exam_id__2023": 0.762962962962963, + "acc,exam_id__2014": 0.6972477064220184, + "acc,exam_id__2009": 0.7304347826086957, + "acc,exam_id__2017": 0.7758620689655172, + "acc,exam_id__2016_2": 0.7073170731707317, + "acc,exam_id__2011": 0.7692307692307693, + "acc,exam_id__2015": 0.7226890756302521, + "acc,exam_id__2022": 0.631578947368421, + "acc,exam_id__2012": 0.7327586206896551, + "acc,exam_id__2013": 0.7592592592592593, + "acc,exam_id__2010": 0.7350427350427351 + }, + "faquad_nli": { + "f1_macro,all": 0.8339739223508806, + "acc,all": 0.8707692307692307, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8236992331909445, + "acc,all": 0.8278571428571428 + }, + "oab_exams": { + "acc,all": 0.5061503416856492, + "acc,exam_id__2011-03": 0.45454545454545453, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2012-06a": 0.5, + "acc,exam_id__2017-24": 0.4625, + "acc,exam_id__2012-09": 0.5064935064935064, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2015-17": 0.6282051282051282, + "acc,exam_id__2017-23": 0.5125, + "acc,exam_id__2010-02": 0.54, + "acc,exam_id__2012-06": 0.5125, + "acc,exam_id__2018-25": 0.4875, + "acc,exam_id__2016-21": 0.45, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2013-12": 0.575, + "acc,exam_id__2017-22": 0.6375, + "acc,exam_id__2012-07": 0.4625, + "acc,exam_id__2016-20a": 0.5, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2016-19": 0.5512820512820513, + "acc,exam_id__2014-13": 0.45, + "acc,exam_id__2012-08": 0.4375, + "acc,exam_id__2013-10": 0.5875, + "acc,exam_id__2011-05": 0.575, + "acc,exam_id__2016-20": 0.525, + "acc,exam_id__2014-15": 0.6153846153846154, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7193440067158361, + "acc,all": 0.7414806110458284 + }, + "tweetsentbr": { + "f1_macro,all": 0.7196621291439484, + "acc,all": 0.7333333333333333, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 2, - "accelerate_num_process": null, - "model_sha": "0ab5c875f0070d5aee8d36bc55f41de440a13f02", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 137953316864, - "model_num_parameters": 68976648192, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1490.9889705882354, - "min_seq_length": 1468, - "max_seq_length": 1557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1690.9889705882354, - "min_seq_length": 1668, - "max_seq_length": 1757, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1659.7426981919332, - "min_seq_length": 1293, - "max_seq_length": 2419, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1538.9881035689293, - "min_seq_length": 1286, - "max_seq_length": 2578, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1684.1184615384616, - "min_seq_length": 1632, - "max_seq_length": 1791, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 2, + "accelerate_num_process": null, + "model_sha": "0ab5c875f0070d5aee8d36bc55f41de440a13f02", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 137953316864, + "model_num_parameters": 68976648192, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1598.9178571428572, - "min_seq_length": 1575, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1308.4145785876992, - "min_seq_length": 1053, - "max_seq_length": 1790, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1490.9889705882354, + "min_seq_length": 1468, + "max_seq_length": 1557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1690.9889705882354, + "min_seq_length": 1668, + "max_seq_length": 1757, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1659.7426981919332, + "min_seq_length": 1293, + "max_seq_length": 2419, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1538.9881035689293, + "min_seq_length": 1286, + "max_seq_length": 2578, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1684.1184615384616, + "min_seq_length": 1632, + "max_seq_length": 1791, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1598.9178571428572, + "min_seq_length": 1575, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1308.4145785876992, + "min_seq_length": 1053, + "max_seq_length": 1790, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2087.801410105758, + "min_seq_length": 2053, + "max_seq_length": 2131, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1862.6845771144278, + "min_seq_length": 1841, + "max_seq_length": 1980, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2087.801410105758, - "min_seq_length": 2053, - "max_seq_length": 2131, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=allenai/tulu-2-dpo-70b,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1862.6845771144278, - "min_seq_length": 1841, - "max_seq_length": 1980, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=allenai/tulu-2-dpo-70b,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/allenai/tulu-2-dpo-70b/results_2024-05-22T02-18-14.364873.json b/allenai/tulu-2-dpo-70b/results_2024-05-22T02-18-14.364873.json index f87475e45db7544584df401c5bf051249454eab4..082038836f38e873a40e225cb31c1fa7723e9b8e 100644 --- a/allenai/tulu-2-dpo-70b/results_2024-05-22T02-18-14.364873.json +++ b/allenai/tulu-2-dpo-70b/results_2024-05-22T02-18-14.364873.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7213011924945768, - "all_grouped_npm": 0.587129696930097, + "all_grouped_average": 0.7412918071930199, + "all_grouped_npm": 0.6168776354694467, "all_grouped": { "enem_challenge": 0.7235829251224632, "bluex": 0.6300417246175244, @@ -45,7 +45,7 @@ "faquad_nli": 0.8339739223508806, "hatebr_offensive": 0.8236992331909445, "portuguese_hate_speech": 0.7193440067158361, - "tweetsentbr": 0.5397465968579614 + "tweetsentbr": 0.7196621291439484 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7235829251224632, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.8339739223508806, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8236992331909445, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7193440067158361, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5397465968579614 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7196621291439484 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7235829251224632, @@ -150,9 +150,9 @@ "main_score": 0.7193440067158361 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5397465968579614, + "f1_macro,all": 0.7196621291439484, "acc,all": 0.7333333333333333, - "main_score": 0.5397465968579614 + "main_score": 0.7196621291439484 } }, "config_tasks": { diff --git a/allknowingroger/MultiverseEx26-7B-slerp/raw_2024-07-04T03-30-47.209441/results.json b/allknowingroger/MultiverseEx26-7B-slerp/raw_2024-07-04T03-30-47.209441/results.json index 0a42df4ab95127385387c4b767181176b8ee859b..4731b5c0eadf7214768c70dc94a18402ae8602a2 100644 --- a/allknowingroger/MultiverseEx26-7B-slerp/raw_2024-07-04T03-30-47.209441/results.json +++ b/allknowingroger/MultiverseEx26-7B-slerp/raw_2024-07-04T03-30-47.209441/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9223768647276014, - "acc,all": 0.9223856209150327, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7803062713375329, - "mse,all": 0.42421977124183013, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5438108484005564, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6389083275017495, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2022": 0.6015037593984962, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2023": 0.6444444444444445, - "acc,exam_id__2016_2": 0.6097560975609756, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2016": 0.5785123966942148, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2013": 0.6944444444444444 - }, - "faquad_nli": { - "f1_macro,all": 0.7842076261469852, - "acc,all": 0.8507692307692307, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8163838486760461, - "acc,all": 0.8207142857142857 - }, - "oab_exams": { - "acc,all": 0.4168564920273349, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2013-10": 0.3875, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2018-25": 0.4625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6983964585673782, - "acc,all": 0.7508813160987075 - }, - "tweetsentbr": { - "f1_macro,all": 0.49149393414413745, - "acc,all": 0.7044776119402985, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9223768647276014, + "acc,all": 0.9223856209150327, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7803062713375329, + "mse,all": 0.42421977124183013, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5438108484005564, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6389083275017495, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2022": 0.6015037593984962, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2023": 0.6444444444444445, + "acc,exam_id__2016_2": 0.6097560975609756, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2016": 0.5785123966942148, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2013": 0.6944444444444444 + }, + "faquad_nli": { + "f1_macro,all": 0.7842076261469852, + "acc,all": 0.8507692307692307, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8163838486760461, + "acc,all": 0.8207142857142857 + }, + "oab_exams": { + "acc,all": 0.4168564920273349, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2013-10": 0.3875, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2018-25": 0.4625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6983964585673782, + "acc,all": 0.7508813160987075 + }, + "tweetsentbr": { + "f1_macro,all": 0.6553252455255166, + "acc,all": 0.7044776119402985, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "43f18d84e025693f00e9be335bf12fce96089b2f", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "43f18d84e025693f00e9be335bf12fce96089b2f", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=allknowingroger/MultiverseEx26-7B-slerp,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=allknowingroger/MultiverseEx26-7B-slerp,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/allknowingroger/MultiverseEx26-7B-slerp/results_2024-07-04T03-30-47.209441.json b/allknowingroger/MultiverseEx26-7B-slerp/results_2024-07-04T03-30-47.209441.json index 0acdec92084d75a9822f648ef4f133fcbd323a9e..05923aab6cef813824099fe127270b14e689c1ae 100644 --- a/allknowingroger/MultiverseEx26-7B-slerp/results_2024-07-04T03-30-47.209441.json +++ b/allknowingroger/MultiverseEx26-7B-slerp/results_2024-07-04T03-30-47.209441.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6769711857254802, - "all_grouped_npm": 0.5231145666996483, + "all_grouped_average": 0.6951746647678557, + "all_grouped_npm": 0.5502030771793737, "all_grouped": { "enem_challenge": 0.6389083275017495, "bluex": 0.5438108484005564, @@ -45,7 +45,7 @@ "faquad_nli": 0.7842076261469852, "hatebr_offensive": 0.8163838486760461, "portuguese_hate_speech": 0.6983964585673782, - "tweetsentbr": 0.49149393414413745 + "tweetsentbr": 0.6553252455255166 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6389083275017495, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7842076261469852, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8163838486760461, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6983964585673782, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49149393414413745 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6553252455255166 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6389083275017495, @@ -150,9 +150,9 @@ "main_score": 0.6983964585673782 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49149393414413745, + "f1_macro,all": 0.6553252455255166, "acc,all": 0.7044776119402985, - "main_score": 0.49149393414413745 + "main_score": 0.6553252455255166 } }, "config_tasks": { diff --git a/argilla/CapybaraHermes-2.5-Mistral-7B/raw_2024-03-08T11-45-00.125851/results.json b/argilla/CapybaraHermes-2.5-Mistral-7B/raw_2024-03-08T11-45-00.125851/results.json index 6f0b2402804527d0b68115867fb2356f071faa53..6c1a393d3cda12d7fac11062ad20da89339a0ee1 100644 --- a/argilla/CapybaraHermes-2.5-Mistral-7B/raw_2024-03-08T11-45-00.125851/results.json +++ b/argilla/CapybaraHermes-2.5-Mistral-7B/raw_2024-03-08T11-45-00.125851/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9010484174855619, - "acc,all": 0.9011437908496732, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7017862358256567, - "mse,all": 0.7094526143790849, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5243393602225312, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__USP_2020": 0.5, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__UNICAMP_2023": 0.5348837209302325, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__USP_2018": 0.42592592592592593, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6242127361791463, - "acc,exam_id__2012": 0.646551724137931, - "acc,exam_id__2009": 0.591304347826087, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2016_2": 0.5691056910569106, - "acc,exam_id__2010": 0.6410256410256411, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2013": 0.6574074074074074, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2023": 0.6296296296296297, - "acc,exam_id__2017": 0.646551724137931, - "acc,exam_id__2022": 0.631578947368421, - "acc,exam_id__2015": 0.5462184873949579 - }, - "faquad_nli": { - "f1_macro,all": 0.7489689797382105, - "acc,all": 0.8276923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7959179423530661, - "acc,all": 0.8 - }, - "oab_exams": { - "acc,all": 0.4432801822323462, - "acc,exam_id__2017-24": 0.425, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2012-06a": 0.5375, - "acc,exam_id__2015-17": 0.5641025641025641, - "acc,exam_id__2011-04": 0.3625, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2016-20a": 0.4625, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2016-19": 0.5769230769230769, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2013-10": 0.4875, - "acc,exam_id__2015-16": 0.425, - "acc,exam_id__2013-11": 0.4375, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2013-12": 0.5125, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2012-08": 0.425, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2012-09": 0.4025974025974026, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2016-21": 0.4375, - "acc,exam_id__2010-02": 0.37, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6897412406985006, - "acc,all": 0.7250293772032902 - }, - "tweetsentbr": { - "f1_macro,all": 0.42828409799858047, - "acc,all": 0.6502487562189054, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9010484174855619, + "acc,all": 0.9011437908496732, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7017862358256567, + "mse,all": 0.7094526143790849, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5243393602225312, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__USP_2020": 0.5, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__UNICAMP_2023": 0.5348837209302325, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__USP_2018": 0.42592592592592593, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6242127361791463, + "acc,exam_id__2012": 0.646551724137931, + "acc,exam_id__2009": 0.591304347826087, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2016_2": 0.5691056910569106, + "acc,exam_id__2010": 0.6410256410256411, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2013": 0.6574074074074074, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2023": 0.6296296296296297, + "acc,exam_id__2017": 0.646551724137931, + "acc,exam_id__2022": 0.631578947368421, + "acc,exam_id__2015": 0.5462184873949579 + }, + "faquad_nli": { + "f1_macro,all": 0.7489689797382105, + "acc,all": 0.8276923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7959179423530661, + "acc,all": 0.8 + }, + "oab_exams": { + "acc,all": 0.4432801822323462, + "acc,exam_id__2017-24": 0.425, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2012-06a": 0.5375, + "acc,exam_id__2015-17": 0.5641025641025641, + "acc,exam_id__2011-04": 0.3625, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2016-20a": 0.4625, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2016-19": 0.5769230769230769, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2013-10": 0.4875, + "acc,exam_id__2015-16": 0.425, + "acc,exam_id__2013-11": 0.4375, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2013-12": 0.5125, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2012-08": 0.425, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2012-09": 0.4025974025974026, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2016-21": 0.4375, + "acc,exam_id__2010-02": 0.37, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6897412406985006, + "acc,all": 0.7250293772032902 + }, + "tweetsentbr": { + "f1_macro,all": 0.5710454639981073, + "acc,all": 0.6502487562189054, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "d06c86726aadd8dadb92c5b9b9e3ce8ef246c471", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020376064, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "d06c86726aadd8dadb92c5b9b9e3ce8ef246c471", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020376064, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=argilla/CapybaraHermes-2.5-Mistral-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=argilla/CapybaraHermes-2.5-Mistral-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/argilla/CapybaraHermes-2.5-Mistral-7B/results_2024-03-08T11-45-00.125851.json b/argilla/CapybaraHermes-2.5-Mistral-7B/results_2024-03-08T11-45-00.125851.json index ee9ed510aa0580f1e07d4aaea49557d37dc6f1ec..cbcf9e5f800b95508e1efc8b8ffba62c6d884ff2 100644 --- a/argilla/CapybaraHermes-2.5-Mistral-7B/results_2024-03-08T11-45-00.125851.json +++ b/argilla/CapybaraHermes-2.5-Mistral-7B/results_2024-03-08T11-45-00.125851.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6508421325259556, - "all_grouped_npm": 0.48468978584541744, + "all_grouped_average": 0.666704506525903, + "all_grouped_npm": 0.508294509059625, "all_grouped": { "enem_challenge": 0.6242127361791463, "bluex": 0.5243393602225312, @@ -45,7 +45,7 @@ "faquad_nli": 0.7489689797382105, "hatebr_offensive": 0.7959179423530661, "portuguese_hate_speech": 0.6897412406985006, - "tweetsentbr": 0.42828409799858047 + "tweetsentbr": 0.5710454639981073 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6242127361791463, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7489689797382105, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7959179423530661, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6897412406985006, - "harness|tweetsentbr|tweetsentbr|None|25": 0.42828409799858047 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5710454639981073 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6242127361791463, @@ -150,9 +150,9 @@ "main_score": 0.6897412406985006 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.42828409799858047, + "f1_macro,all": 0.5710454639981073, "acc,all": 0.6502487562189054, - "main_score": 0.42828409799858047 + "main_score": 0.5710454639981073 } }, "config_tasks": { diff --git a/argilla/notux-8x7b-v1/raw_2024-03-07T15-39-25.432269/results.json b/argilla/notux-8x7b-v1/raw_2024-03-07T15-39-25.432269/results.json index 3123580878fc49d21ef3ab8842c85efb3594ad74..44862fc6b6f7903ae0727e38107415d8eb59b7da 100644 --- a/argilla/notux-8x7b-v1/raw_2024-03-07T15-39-25.432269/results.json +++ b/argilla/notux-8x7b-v1/raw_2024-03-07T15-39-25.432269/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.61773132029976, - "acc,all": 0.926062091503268, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8239880630155871, - "mse,all": 0.40034313725490195, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.6022253129346314, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__USP_2022": 0.5918367346938775, - "acc,exam_id__USP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2023": 0.7906976744186046, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2024": 0.6, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__UNICAMP_2020": 0.6727272727272727, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__UNICAMP_2019": 0.66, - "acc,exam_id__USP_2021": 0.6153846153846154, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__USP_2020": 0.5, - "acc,exam_id__USP_2019": 0.5, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7095871238628412, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2015": 0.7226890756302521, - "acc,exam_id__2013": 0.7314814814814815, - "acc,exam_id__2010": 0.7521367521367521, - "acc,exam_id__2023": 0.7037037037037037, - "acc,exam_id__2014": 0.7339449541284404, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2016": 0.6694214876033058, - "acc,exam_id__2012": 0.6724137931034483, - "acc,exam_id__2009": 0.7391304347826086, - "acc,exam_id__2011": 0.8034188034188035 - }, - "faquad_nli": { - "f1_macro,all": 0.7984830206980806, - "acc,all": 0.8446153846153847, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7790834886956947, - "acc,all": 0.7878571428571428 - }, - "oab_exams": { - "acc,all": 0.49521640091116176, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2011-03": 0.4444444444444444, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2014-14": 0.5625, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2015-18": 0.575, - "acc,exam_id__2011-04": 0.45, - "acc,exam_id__2015-17": 0.6794871794871795, - "acc,exam_id__2013-10": 0.4875, - "acc,exam_id__2010-02": 0.51, - "acc,exam_id__2016-20": 0.45, - "acc,exam_id__2012-09": 0.44155844155844154, - "acc,exam_id__2012-06": 0.575, - "acc,exam_id__2017-23": 0.475, - "acc,exam_id__2012-06a": 0.5125, - "acc,exam_id__2012-08": 0.4375, - "acc,exam_id__2014-15": 0.6025641025641025, - "acc,exam_id__2012-07": 0.4875, - "acc,exam_id__2014-13": 0.5125, - "acc,exam_id__2013-11": 0.4375, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2016-21": 0.475, - "acc,exam_id__2013-12": 0.5125, - "acc,exam_id__2017-22": 0.625, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2016-19": 0.5256410256410257, - "acc,exam_id__2015-16": 0.525, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7330226721453221, - "acc,all": 0.7591069330199764 - }, - "tweetsentbr": { - "f1_macro,all": 0.533069727855328, - "acc,all": 0.7502487562189055, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9265969804496399, + "acc,all": 0.926062091503268, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8239880630155871, + "mse,all": 0.40034313725490195, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.6022253129346314, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__USP_2022": 0.5918367346938775, + "acc,exam_id__USP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2023": 0.7906976744186046, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2024": 0.6, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__UNICAMP_2020": 0.6727272727272727, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__UNICAMP_2019": 0.66, + "acc,exam_id__USP_2021": 0.6153846153846154, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__USP_2020": 0.5, + "acc,exam_id__USP_2019": 0.5, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7095871238628412, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2015": 0.7226890756302521, + "acc,exam_id__2013": 0.7314814814814815, + "acc,exam_id__2010": 0.7521367521367521, + "acc,exam_id__2023": 0.7037037037037037, + "acc,exam_id__2014": 0.7339449541284404, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2016": 0.6694214876033058, + "acc,exam_id__2012": 0.6724137931034483, + "acc,exam_id__2009": 0.7391304347826086, + "acc,exam_id__2011": 0.8034188034188035 + }, + "faquad_nli": { + "f1_macro,all": 0.7984830206980806, + "acc,all": 0.8446153846153847, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7790834886956947, + "acc,all": 0.7878571428571428 + }, + "oab_exams": { + "acc,all": 0.49521640091116176, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2011-03": 0.4444444444444444, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2014-14": 0.5625, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2015-18": 0.575, + "acc,exam_id__2011-04": 0.45, + "acc,exam_id__2015-17": 0.6794871794871795, + "acc,exam_id__2013-10": 0.4875, + "acc,exam_id__2010-02": 0.51, + "acc,exam_id__2016-20": 0.45, + "acc,exam_id__2012-09": 0.44155844155844154, + "acc,exam_id__2012-06": 0.575, + "acc,exam_id__2017-23": 0.475, + "acc,exam_id__2012-06a": 0.5125, + "acc,exam_id__2012-08": 0.4375, + "acc,exam_id__2014-15": 0.6025641025641025, + "acc,exam_id__2012-07": 0.4875, + "acc,exam_id__2014-13": 0.5125, + "acc,exam_id__2013-11": 0.4375, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2016-21": 0.475, + "acc,exam_id__2013-12": 0.5125, + "acc,exam_id__2017-22": 0.625, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2016-19": 0.5256410256410257, + "acc,exam_id__2015-16": 0.525, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7330226721453221, + "acc,all": 0.7591069330199764 + }, + "tweetsentbr": { + "f1_macro,all": 0.7107596371404373, + "acc,all": 0.7502487562189055, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 2, - "accelerate_num_process": null, - "model_sha": "0b29f9afcbae2ab4c5085638d8f5a7f6d44c6b17", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 93942464512, - "model_num_parameters": 46702792704, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 2, + "accelerate_num_process": null, + "model_sha": "0b29f9afcbae2ab4c5085638d8f5a7f6d44c6b17", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 93942464512, + "model_num_parameters": 46702792704, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=argilla/notux-8x7b-v1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=argilla/notux-8x7b-v1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/argilla/notux-8x7b-v1/results_2024-03-07T15-39-25.432269.json b/argilla/notux-8x7b-v1/results_2024-03-07T15-39-25.432269.json index d41eda201d06ae4658b79632b60f800892883357..e1bc969bf392d9b79545721ef64ee5f442f75e82 100644 --- a/argilla/notux-8x7b-v1/results_2024-03-07T15-39-25.432269.json +++ b/argilla/notux-8x7b-v1/results_2024-03-07T15-39-25.432269.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6769341256020452, - "all_grouped_npm": 0.4989549218909805, + "all_grouped_average": 0.7309958555392664, + "all_grouped_npm": 0.5969716805759255, "all_grouped": { "enem_challenge": 0.7095871238628412, "bluex": 0.6022253129346314, "oab_exams": 0.49521640091116176, - "assin2_rte": 0.61773132029976, + "assin2_rte": 0.9265969804496399, "assin2_sts": 0.8239880630155871, "faquad_nli": 0.7984830206980806, "hatebr_offensive": 0.7790834886956947, "portuguese_hate_speech": 0.7330226721453221, - "tweetsentbr": 0.533069727855328 + "tweetsentbr": 0.7107596371404373 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7095871238628412, "harness|bluex|bluex|None|3": 0.6022253129346314, "harness|oab_exams|oab_exams|None|3": 0.49521640091116176, - "harness|assin2_rte|assin2_rte|None|15": 0.61773132029976, + "harness|assin2_rte|assin2_rte|None|15": 0.9265969804496399, "harness|assin2_sts|assin2_sts|None|15": 0.8239880630155871, "harness|faquad_nli|faquad_nli|None|15": 0.7984830206980806, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7790834886956947, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7330226721453221, - "harness|tweetsentbr|tweetsentbr|None|25": 0.533069727855328 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7107596371404373 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7095871238628412, @@ -125,9 +125,9 @@ "main_score": 0.49521640091116176 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.61773132029976, + "f1_macro,all": 0.9265969804496399, "acc,all": 0.926062091503268, - "main_score": 0.61773132029976 + "main_score": 0.9265969804496399 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.8239880630155871, @@ -150,9 +150,9 @@ "main_score": 0.7330226721453221 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.533069727855328, + "f1_macro,all": 0.7107596371404373, "acc,all": 0.7502487562189055, - "main_score": 0.533069727855328 + "main_score": 0.7107596371404373 } }, "config_tasks": { diff --git a/automerger/YamshadowExperiment28-7B/raw_2024-06-18T09-27-09.546862/results.json b/automerger/YamshadowExperiment28-7B/raw_2024-06-18T09-27-09.546862/results.json index ee825dc99f48ba804c0a272713cfc5be872dd3d4..7a70cd350f5e5aab77deae03ff483835eb79956c 100644 --- a/automerger/YamshadowExperiment28-7B/raw_2024-06-18T09-27-09.546862/results.json +++ b/automerger/YamshadowExperiment28-7B/raw_2024-06-18T09-27-09.546862/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9223814244089994, - "acc,all": 0.9223856209150327, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.778134909825148, - "mse,all": 0.42924836601307187, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.541029207232267, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2022": 0.46938775510204084, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6375087473757872, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2023": 0.6444444444444445, - "acc,exam_id__2012": 0.6293103448275862, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2010": 0.6923076923076923, - "acc,exam_id__2016": 0.5785123966942148, - "acc,exam_id__2009": 0.6434782608695652, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2011": 0.6752136752136753, - "acc,exam_id__2017": 0.6724137931034483 - }, - "faquad_nli": { - "f1_macro,all": 0.7859986232079255, - "acc,all": 0.8476923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8186191231446596, - "acc,all": 0.8228571428571428 - }, - "oab_exams": { - "acc,all": 0.41776765375854213, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2012-06": 0.4875, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2017-22": 0.55, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2017-24": 0.3875, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2016-20a": 0.3625, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2011-04": 0.3875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7040205898720089, - "acc,all": 0.7532314923619271 - }, - "tweetsentbr": { - "f1_macro,all": 0.4954833856454548, - "acc,all": 0.7069651741293532, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9223814244089994, + "acc,all": 0.9223856209150327, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.778134909825148, + "mse,all": 0.42924836601307187, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.541029207232267, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2022": 0.46938775510204084, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6375087473757872, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2023": 0.6444444444444445, + "acc,exam_id__2012": 0.6293103448275862, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2010": 0.6923076923076923, + "acc,exam_id__2016": 0.5785123966942148, + "acc,exam_id__2009": 0.6434782608695652, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2011": 0.6752136752136753, + "acc,exam_id__2017": 0.6724137931034483 + }, + "faquad_nli": { + "f1_macro,all": 0.7859986232079255, + "acc,all": 0.8476923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8186191231446596, + "acc,all": 0.8228571428571428 + }, + "oab_exams": { + "acc,all": 0.41776765375854213, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2012-06": 0.4875, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2017-22": 0.55, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2017-24": 0.3875, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2016-20a": 0.3625, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2011-04": 0.3875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7040205898720089, + "acc,all": 0.7532314923619271 + }, + "tweetsentbr": { + "f1_macro,all": 0.6606445141939398, + "acc,all": 0.7069651741293532, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "76972ed8aacba1fd14f78e6f8d347f087f8b6800", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "76972ed8aacba1fd14f78e6f8d347f087f8b6800", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=automerger/YamshadowExperiment28-7B,dtype=bfloat16,device=cuda:0,revision=76972ed8aacba1fd14f78e6f8d347f087f8b6800,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=automerger/YamshadowExperiment28-7B,dtype=bfloat16,device=cuda:0,revision=76972ed8aacba1fd14f78e6f8d347f087f8b6800,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/automerger/YamshadowExperiment28-7B/results_2024-06-18T09-27-09.546862.json b/automerger/YamshadowExperiment28-7B/results_2024-06-18T09-27-09.546862.json index 0d7d9f7901d0fe501424bb91e9af9d6d9ed71dce..56ca6aeb7396a2e4ca7b17d9b32e3c696605f33e 100644 --- a/automerger/YamshadowExperiment28-7B/results_2024-06-18T09-27-09.546862.json +++ b/automerger/YamshadowExperiment28-7B/results_2024-06-18T09-27-09.546862.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6778826293856436, - "all_grouped_npm": 0.5251377150399352, + "all_grouped_average": 0.6962338658910308, + "all_grouped_npm": 0.5524461026967614, "all_grouped": { "enem_challenge": 0.6375087473757872, "bluex": 0.541029207232267, @@ -45,7 +45,7 @@ "faquad_nli": 0.7859986232079255, "hatebr_offensive": 0.8186191231446596, "portuguese_hate_speech": 0.7040205898720089, - "tweetsentbr": 0.4954833856454548 + "tweetsentbr": 0.6606445141939398 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6375087473757872, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7859986232079255, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8186191231446596, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7040205898720089, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4954833856454548 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6606445141939398 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6375087473757872, @@ -150,9 +150,9 @@ "main_score": 0.7040205898720089 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4954833856454548, + "f1_macro,all": 0.6606445141939398, "acc,all": 0.7069651741293532, - "main_score": 0.4954833856454548 + "main_score": 0.6606445141939398 } }, "config_tasks": { diff --git a/axolotl-ai-co/romulus-mistral-nemo-12b-simpo/raw_2024-09-01T06-34-38.572809/results.json b/axolotl-ai-co/romulus-mistral-nemo-12b-simpo/raw_2024-09-01T06-34-38.572809/results.json index cecfe5f35132af5968b2d221113e61c5de3f4298..d78949e4861290cfda365936d56fe49b8e9f1808 100644 --- a/axolotl-ai-co/romulus-mistral-nemo-12b-simpo/raw_2024-09-01T06-34-38.572809/results.json +++ b/axolotl-ai-co/romulus-mistral-nemo-12b-simpo/raw_2024-09-01T06-34-38.572809/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9051636192879993, - "acc,all": 0.9056372549019608, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7869752179868569, - "mse,all": 0.4992754673202614, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.60778859527121, - "acc,exam_id__USP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2020": 0.6363636363636364, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__USP_2020": 0.625, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__USP_2019": 0.625, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2022": 0.7435897435897436, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__USP_2022": 0.6326530612244898, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7095871238628412, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2015": 0.7394957983193278, - "acc,exam_id__2011": 0.7606837606837606, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2009": 0.6695652173913044, - "acc,exam_id__2016_2": 0.7154471544715447, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2014": 0.6972477064220184, - "acc,exam_id__2016": 0.7107438016528925, - "acc,exam_id__2023": 0.7703703703703704, - "acc,exam_id__2017": 0.7155172413793104, - "acc,exam_id__2010": 0.717948717948718 - }, - "faquad_nli": { - "f1_macro,all": 0.6804733727810651, - "acc,all": 0.7092307692307692, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.844157646190224, - "acc,all": 0.8464285714285714 - }, - "oab_exams": { - "acc,all": 0.5362186788154898, - "acc,exam_id__2012-06a": 0.625, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2011-05": 0.6, - "acc,exam_id__2011-04": 0.475, - "acc,exam_id__2012-07": 0.5625, - "acc,exam_id__2010-01": 0.4470588235294118, - "acc,exam_id__2014-13": 0.45, - "acc,exam_id__2015-16": 0.4375, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2016-21": 0.4875, - "acc,exam_id__2012-06": 0.5125, - "acc,exam_id__2017-24": 0.525, - "acc,exam_id__2012-08": 0.6125, - "acc,exam_id__2015-17": 0.6410256410256411, - "acc,exam_id__2016-20a": 0.475, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2013-11": 0.5375, - "acc,exam_id__2015-18": 0.6, - "acc,exam_id__2014-14": 0.625, - "acc,exam_id__2016-20": 0.575, - "acc,exam_id__2012-09": 0.5064935064935064, - "acc,exam_id__2010-02": 0.57, - "acc,exam_id__2014-15": 0.6282051282051282, - "acc,exam_id__2011-03": 0.45454545454545453, - "acc,exam_id__2013-10": 0.5, - "acc,exam_id__2017-22": 0.6125, - "acc,exam_id__2016-19": 0.6410256410256411, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.47766309569344845, - "acc,all": 0.7391304347826086 - }, - "tweetsentbr": { - "f1_macro,all": 0.6905432027259325, - "acc,all": 0.7313432835820896, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9051636192879993, + "acc,all": 0.9056372549019608, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7869752179868569, + "mse,all": 0.4992754673202614, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.60778859527121, + "acc,exam_id__USP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2020": 0.6363636363636364, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__USP_2020": 0.625, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__USP_2019": 0.625, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2022": 0.7435897435897436, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__USP_2022": 0.6326530612244898, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7095871238628412, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2015": 0.7394957983193278, + "acc,exam_id__2011": 0.7606837606837606, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2009": 0.6695652173913044, + "acc,exam_id__2016_2": 0.7154471544715447, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2014": 0.6972477064220184, + "acc,exam_id__2016": 0.7107438016528925, + "acc,exam_id__2023": 0.7703703703703704, + "acc,exam_id__2017": 0.7155172413793104, + "acc,exam_id__2010": 0.717948717948718 + }, + "faquad_nli": { + "f1_macro,all": 0.6804733727810651, + "acc,all": 0.7092307692307692, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.844157646190224, + "acc,all": 0.8464285714285714 + }, + "oab_exams": { + "acc,all": 0.5362186788154898, + "acc,exam_id__2012-06a": 0.625, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2011-05": 0.6, + "acc,exam_id__2011-04": 0.475, + "acc,exam_id__2012-07": 0.5625, + "acc,exam_id__2010-01": 0.4470588235294118, + "acc,exam_id__2014-13": 0.45, + "acc,exam_id__2015-16": 0.4375, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2016-21": 0.4875, + "acc,exam_id__2012-06": 0.5125, + "acc,exam_id__2017-24": 0.525, + "acc,exam_id__2012-08": 0.6125, + "acc,exam_id__2015-17": 0.6410256410256411, + "acc,exam_id__2016-20a": 0.475, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2013-11": 0.5375, + "acc,exam_id__2015-18": 0.6, + "acc,exam_id__2014-14": 0.625, + "acc,exam_id__2016-20": 0.575, + "acc,exam_id__2012-09": 0.5064935064935064, + "acc,exam_id__2010-02": 0.57, + "acc,exam_id__2014-15": 0.6282051282051282, + "acc,exam_id__2011-03": 0.45454545454545453, + "acc,exam_id__2013-10": 0.5, + "acc,exam_id__2017-22": 0.6125, + "acc,exam_id__2016-19": 0.6410256410256411, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7164946435401727, + "acc,all": 0.7391304347826086 + }, + "tweetsentbr": { + "f1_macro,all": 0.6905432027259325, + "acc,all": 0.7313432835820896, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "15fd3ffa46c1ea51aa5d26a1da24214e324d7cf2", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 24495575040, - "model_num_parameters": 12247782400, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1232.376633986928, - "min_seq_length": 1214, - "max_seq_length": 1290, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1426.376633986928, - "min_seq_length": 1408, - "max_seq_length": 1484, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1442.518776077886, - "min_seq_length": 1139, - "max_seq_length": 2092, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1314.4590622813157, - "min_seq_length": 1105, - "max_seq_length": 2379, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1320.0492307692307, - "min_seq_length": 1279, - "max_seq_length": 1400, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "15fd3ffa46c1ea51aa5d26a1da24214e324d7cf2", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 24495575040, + "model_num_parameters": 12247782400, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1291.5, - "min_seq_length": 1271, - "max_seq_length": 1499, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1065.0373576309794, - "min_seq_length": 853, - "max_seq_length": 1411, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1232.376633986928, + "min_seq_length": 1214, + "max_seq_length": 1290, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1426.376633986928, + "min_seq_length": 1408, + "max_seq_length": 1484, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1442.518776077886, + "min_seq_length": 1139, + "max_seq_length": 2092, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1314.4590622813157, + "min_seq_length": 1105, + "max_seq_length": 2379, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1320.0492307692307, + "min_seq_length": 1279, + "max_seq_length": 1400, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1291.5, + "min_seq_length": 1271, + "max_seq_length": 1499, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1065.0373576309794, + "min_seq_length": 853, + "max_seq_length": 1411, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1661.2761457109284, + "min_seq_length": 1632, + "max_seq_length": 1703, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1514.776616915423, + "min_seq_length": 1497, + "max_seq_length": 1587, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1661.2761457109284, - "min_seq_length": 1632, - "max_seq_length": 1703, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=axolotl-ai-co/romulus-mistral-nemo-12b-simpo,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1514.776616915423, - "min_seq_length": 1497, - "max_seq_length": 1587, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=axolotl-ai-co/romulus-mistral-nemo-12b-simpo,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/axolotl-ai-co/romulus-mistral-nemo-12b-simpo/results_2024-09-01T06-34-38.572809.json b/axolotl-ai-co/romulus-mistral-nemo-12b-simpo/results_2024-09-01T06-34-38.572809.json index 339d407fe8e685b004361e0cefb535cead81c871..d80dc1bb04eb82817c98f966eaf580b9082e5e6e 100644 --- a/axolotl-ai-co/romulus-mistral-nemo-12b-simpo/results_2024-09-01T06-34-38.572809.json +++ b/axolotl-ai-co/romulus-mistral-nemo-12b-simpo/results_2024-09-01T06-34-38.572809.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6931745058461186, - "all_grouped_npm": 0.5275238868699735, + "all_grouped_average": 0.7197113444957547, + "all_grouped_npm": 0.578458318059294, "all_grouped": { "enem_challenge": 0.7095871238628412, "bluex": 0.60778859527121, @@ -44,7 +44,7 @@ "assin2_sts": 0.7869752179868569, "faquad_nli": 0.6804733727810651, "hatebr_offensive": 0.844157646190224, - "portuguese_hate_speech": 0.47766309569344845, + "portuguese_hate_speech": 0.7164946435401727, "tweetsentbr": 0.6905432027259325 }, "all": { @@ -55,7 +55,7 @@ "harness|assin2_sts|assin2_sts|None|15": 0.7869752179868569, "harness|faquad_nli|faquad_nli|None|15": 0.6804733727810651, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.844157646190224, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.47766309569344845, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7164946435401727, "harness|tweetsentbr|tweetsentbr|None|25": 0.6905432027259325 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -145,9 +145,9 @@ "main_score": 0.844157646190224 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.47766309569344845, + "f1_macro,all": 0.7164946435401727, "acc,all": 0.7391304347826086, - "main_score": 0.47766309569344845 + "main_score": 0.7164946435401727 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.6905432027259325, diff --git a/baichuan-inc/Baichuan2-13B-Chat/raw_2024-05-27T05-34-57.596146/results.json b/baichuan-inc/Baichuan2-13B-Chat/raw_2024-05-27T05-34-57.596146/results.json index 351f844c8b39e43bf42ef359292334b8c3cfa48e..9cb276b0c731e08c7e2470eb97c2a795a101b2c8 100644 --- a/baichuan-inc/Baichuan2-13B-Chat/raw_2024-05-27T05-34-57.596146/results.json +++ b/baichuan-inc/Baichuan2-13B-Chat/raw_2024-05-27T05-34-57.596146/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.7943921867307393, - "acc,all": 0.7961601307189542, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.4474793720099136, - "mse,all": 1.2170588235294115, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.26286509040333794, - "acc,exam_id__UNICAMP_2018": 0.24074074074074073, - "acc,exam_id__UNICAMP_2023": 0.27906976744186046, - "acc,exam_id__USP_2023": 0.2727272727272727, - "acc,exam_id__UNICAMP_2024": 0.35555555555555557, - "acc,exam_id__USP_2024": 0.17073170731707318, - "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, - "acc,exam_id__USP_2020": 0.26785714285714285, - "acc,exam_id__UNICAMP_2020": 0.2909090909090909, - "acc,exam_id__UNICAMP_2022": 0.28205128205128205, - "acc,exam_id__UNICAMP_2019": 0.28, - "acc,exam_id__UNICAMP_2021_2": 0.27450980392156865, - "acc,exam_id__USP_2018": 0.25925925925925924, - "acc,exam_id__USP_2021": 0.21153846153846154, - "acc,exam_id__USP_2019": 0.2, - "acc,exam_id__USP_2022": 0.24489795918367346, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.25332400279916023, - "acc,exam_id__2011": 0.24786324786324787, - "acc,exam_id__2017": 0.2672413793103448, - "acc,exam_id__2015": 0.2689075630252101, - "acc,exam_id__2016": 0.2396694214876033, - "acc,exam_id__2016_2": 0.2601626016260163, - "acc,exam_id__2009": 0.2782608695652174, - "acc,exam_id__2012": 0.19827586206896552, - "acc,exam_id__2010": 0.26495726495726496, - "acc,exam_id__2013": 0.24074074074074073, - "acc,exam_id__2014": 0.24770642201834864, - "acc,exam_id__2022": 0.24060150375939848, - "acc,exam_id__2023": 0.2814814814814815 - }, - "faquad_nli": { - "f1_macro,all": 0.3259738358552588, - "acc,all": 0.5046153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7634894938343515, - "acc,all": 0.7671428571428571 - }, - "oab_exams": { - "acc,all": 0.28792710706150343, - "acc,exam_id__2011-03": 0.24242424242424243, - "acc,exam_id__2014-13": 0.3125, - "acc,exam_id__2013-10": 0.3, - "acc,exam_id__2017-24": 0.3, - "acc,exam_id__2017-22": 0.3, - "acc,exam_id__2012-06a": 0.275, - "acc,exam_id__2016-20a": 0.2375, - "acc,exam_id__2012-09": 0.2857142857142857, - "acc,exam_id__2015-16": 0.2625, - "acc,exam_id__2011-04": 0.2625, - "acc,exam_id__2012-07": 0.3125, - "acc,exam_id__2014-14": 0.325, - "acc,exam_id__2014-15": 0.34615384615384615, - "acc,exam_id__2010-02": 0.27, - "acc,exam_id__2015-18": 0.225, - "acc,exam_id__2016-19": 0.3076923076923077, - "acc,exam_id__2012-06": 0.25, - "acc,exam_id__2013-12": 0.2875, - "acc,exam_id__2011-05": 0.275, - "acc,exam_id__2017-23": 0.25, - "acc,exam_id__2013-11": 0.3375, - "acc,exam_id__2016-20": 0.2625, - "acc,exam_id__2016-21": 0.3, - "acc,exam_id__2018-25": 0.3125, - "acc,exam_id__2010-01": 0.25882352941176473, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2015-17": 0.32051282051282054, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.42443666362361115, - "acc,all": 0.4289071680376028 - }, - "tweetsentbr": { - "f1_macro,all": 0.36948384084531244, - "acc,all": 0.5447761194029851, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.7943921867307393, + "acc,all": 0.7961601307189542, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.4474793720099136, + "mse,all": 1.2170588235294115, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.26286509040333794, + "acc,exam_id__UNICAMP_2018": 0.24074074074074073, + "acc,exam_id__UNICAMP_2023": 0.27906976744186046, + "acc,exam_id__USP_2023": 0.2727272727272727, + "acc,exam_id__UNICAMP_2024": 0.35555555555555557, + "acc,exam_id__USP_2024": 0.17073170731707318, + "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, + "acc,exam_id__USP_2020": 0.26785714285714285, + "acc,exam_id__UNICAMP_2020": 0.2909090909090909, + "acc,exam_id__UNICAMP_2022": 0.28205128205128205, + "acc,exam_id__UNICAMP_2019": 0.28, + "acc,exam_id__UNICAMP_2021_2": 0.27450980392156865, + "acc,exam_id__USP_2018": 0.25925925925925924, + "acc,exam_id__USP_2021": 0.21153846153846154, + "acc,exam_id__USP_2019": 0.2, + "acc,exam_id__USP_2022": 0.24489795918367346, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.25332400279916023, + "acc,exam_id__2011": 0.24786324786324787, + "acc,exam_id__2017": 0.2672413793103448, + "acc,exam_id__2015": 0.2689075630252101, + "acc,exam_id__2016": 0.2396694214876033, + "acc,exam_id__2016_2": 0.2601626016260163, + "acc,exam_id__2009": 0.2782608695652174, + "acc,exam_id__2012": 0.19827586206896552, + "acc,exam_id__2010": 0.26495726495726496, + "acc,exam_id__2013": 0.24074074074074073, + "acc,exam_id__2014": 0.24770642201834864, + "acc,exam_id__2022": 0.24060150375939848, + "acc,exam_id__2023": 0.2814814814814815 + }, + "faquad_nli": { + "f1_macro,all": 0.48896075378288817, + "acc,all": 0.5046153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7634894938343515, + "acc,all": 0.7671428571428571 + }, + "oab_exams": { + "acc,all": 0.28792710706150343, + "acc,exam_id__2011-03": 0.24242424242424243, + "acc,exam_id__2014-13": 0.3125, + "acc,exam_id__2013-10": 0.3, + "acc,exam_id__2017-24": 0.3, + "acc,exam_id__2017-22": 0.3, + "acc,exam_id__2012-06a": 0.275, + "acc,exam_id__2016-20a": 0.2375, + "acc,exam_id__2012-09": 0.2857142857142857, + "acc,exam_id__2015-16": 0.2625, + "acc,exam_id__2011-04": 0.2625, + "acc,exam_id__2012-07": 0.3125, + "acc,exam_id__2014-14": 0.325, + "acc,exam_id__2014-15": 0.34615384615384615, + "acc,exam_id__2010-02": 0.27, + "acc,exam_id__2015-18": 0.225, + "acc,exam_id__2016-19": 0.3076923076923077, + "acc,exam_id__2012-06": 0.25, + "acc,exam_id__2013-12": 0.2875, + "acc,exam_id__2011-05": 0.275, + "acc,exam_id__2017-23": 0.25, + "acc,exam_id__2013-11": 0.3375, + "acc,exam_id__2016-20": 0.2625, + "acc,exam_id__2016-21": 0.3, + "acc,exam_id__2018-25": 0.3125, + "acc,exam_id__2010-01": 0.25882352941176473, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2015-17": 0.32051282051282054, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.42443666362361115, + "acc,all": 0.4289071680376028 + }, + "tweetsentbr": { + "f1_macro,all": 0.49264512112708325, + "acc,all": 0.5447761194029851, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 5, - "non_truncated": 14145, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 5, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c8d877c7ca596d9aeff429d43bff06e288684f45", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 27793336320, - "model_num_parameters": 13896668160, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1393.9803921568628, - "min_seq_length": 1370, - "max_seq_length": 1463, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1629.9803921568628, - "min_seq_length": 1606, - "max_seq_length": 1699, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 3, - "non_truncated": 716, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 3, - "mean_seq_length": 1799.5243393602225, - "min_seq_length": 1406, - "max_seq_length": 2648, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.995827538247566 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1713.3477956613017, - "min_seq_length": 1431, - "max_seq_length": 2744, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1638.336923076923, - "min_seq_length": 1580, - "max_seq_length": 1762, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 5, + "non_truncated": 14145, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 5, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c8d877c7ca596d9aeff429d43bff06e288684f45", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 27793336320, + "model_num_parameters": 13896668160, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1271.5492857142858, - "min_seq_length": 1248, - "max_seq_length": 1548, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1469.0100227790433, - "min_seq_length": 1181, - "max_seq_length": 2022, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1393.9803921568628, + "min_seq_length": 1370, + "max_seq_length": 1463, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1629.9803921568628, + "min_seq_length": 1606, + "max_seq_length": 1699, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 3, + "non_truncated": 716, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 3, + "mean_seq_length": 1799.5243393602225, + "min_seq_length": 1406, + "max_seq_length": 2648, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.995827538247566 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1713.3477956613017, + "min_seq_length": 1431, + "max_seq_length": 2744, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1638.336923076923, + "min_seq_length": 1580, + "max_seq_length": 1762, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1271.5492857142858, + "min_seq_length": 1248, + "max_seq_length": 1548, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1469.0100227790433, + "min_seq_length": 1181, + "max_seq_length": 2022, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1810.5064629847238, + "min_seq_length": 1774, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1566.1213930348258, + "min_seq_length": 1545, + "max_seq_length": 1619, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1810.5064629847238, - "min_seq_length": 1774, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=baichuan-inc/Baichuan2-13B-Chat,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1566.1213930348258, - "min_seq_length": 1545, - "max_seq_length": 1619, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=baichuan-inc/Baichuan2-13B-Chat,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/baichuan-inc/Baichuan2-13B-Chat/results_2024-05-27T05-34-57.596146.json b/baichuan-inc/Baichuan2-13B-Chat/results_2024-05-27T05-34-57.596146.json index 853706de9a6357a5d55f06130179c51a37e26f62..02efa4e9db32bdc7bd2b6e1be08a10f74d7cf3e7 100644 --- a/baichuan-inc/Baichuan2-13B-Chat/results_2024-05-27T05-34-57.596146.json +++ b/baichuan-inc/Baichuan2-13B-Chat/results_2024-05-27T05-34-57.596146.json @@ -34,18 +34,18 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.4365968436847987, - "all_grouped_npm": 0.16081228032932293, + "all_grouped_average": 0.4683910879302876, + "all_grouped_npm": 0.2144660602981052, "all_grouped": { "enem_challenge": 0.25332400279916023, "bluex": 0.26286509040333794, "oab_exams": 0.28792710706150343, "assin2_rte": 0.7943921867307393, "assin2_sts": 0.4474793720099136, - "faquad_nli": 0.3259738358552588, + "faquad_nli": 0.48896075378288817, "hatebr_offensive": 0.7634894938343515, "portuguese_hate_speech": 0.42443666362361115, - "tweetsentbr": 0.36948384084531244 + "tweetsentbr": 0.49264512112708325 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.25332400279916023, @@ -53,10 +53,10 @@ "harness|oab_exams|oab_exams|None|3": 0.28792710706150343, "harness|assin2_rte|assin2_rte|None|15": 0.7943921867307393, "harness|assin2_sts|assin2_sts|None|15": 0.4474793720099136, - "harness|faquad_nli|faquad_nli|None|15": 0.3259738358552588, + "harness|faquad_nli|faquad_nli|None|15": 0.48896075378288817, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7634894938343515, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.42443666362361115, - "harness|tweetsentbr|tweetsentbr|None|25": 0.36948384084531244 + "harness|tweetsentbr|tweetsentbr|None|25": 0.49264512112708325 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.25332400279916023, @@ -135,9 +135,9 @@ "main_score": 0.4474793720099136 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.3259738358552588, + "f1_macro,all": 0.48896075378288817, "acc,all": 0.5046153846153846, - "main_score": 0.3259738358552588 + "main_score": 0.48896075378288817 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.7634894938343515, @@ -150,9 +150,9 @@ "main_score": 0.42443666362361115 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.36948384084531244, + "f1_macro,all": 0.49264512112708325, "acc,all": 0.5447761194029851, - "main_score": 0.36948384084531244 + "main_score": 0.49264512112708325 } }, "config_tasks": { diff --git a/bardsai/jaskier-7b-dpo-v5.6/raw_2024-02-26T22-47-54.215589/results.json b/bardsai/jaskier-7b-dpo-v5.6/raw_2024-02-26T22-47-54.215589/results.json index 3958b09f8d8fcf39893593a586879a7711133432..57c0ed9d52655c7980eb228cb9501b88dda4c0f7 100644 --- a/bardsai/jaskier-7b-dpo-v5.6/raw_2024-02-26T22-47-54.215589/results.json +++ b/bardsai/jaskier-7b-dpo-v5.6/raw_2024-02-26T22-47-54.215589/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9227847245691112, - "acc,all": 0.9227941176470589, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7770773139641775, - "mse,all": 0.428693222630719, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5424200278164116, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2020": 0.5, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2019": 0.45, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6340097970608818, - "acc,exam_id__2011": 0.6581196581196581, - "acc,exam_id__2016": 0.5785123966942148, - "acc,exam_id__2012": 0.6120689655172413, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2010": 0.6837606837606838, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2009": 0.6434782608695652, - "acc,exam_id__2016_2": 0.6178861788617886, - "acc,exam_id__2022": 0.6090225563909775 - }, - "faquad_nli": { - "f1_macro,all": 0.7831328616487405, - "acc,all": 0.8507692307692307, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8099141363386402, - "acc,all": 0.815 - }, - "oab_exams": { - "acc,all": 0.41594533029612757, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2015-16": 0.3625, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2012-06a": 0.375, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2015-17": 0.5128205128205128, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2013-11": 0.475, - "acc,exam_id__2012-06": 0.4875, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2015-18": 0.4, - "acc,exam_id__2018-25": 0.4375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.701880824433712, - "acc,all": 0.7579318448883666 - }, - "tweetsentbr": { - "f1_macro,all": 0.4946921882690798, - "acc,all": 0.7074626865671642, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9227847245691112, + "acc,all": 0.9227941176470589, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7770773139641775, + "mse,all": 0.428693222630719, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5424200278164116, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2020": 0.5, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2019": 0.45, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6340097970608818, + "acc,exam_id__2011": 0.6581196581196581, + "acc,exam_id__2016": 0.5785123966942148, + "acc,exam_id__2012": 0.6120689655172413, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2010": 0.6837606837606838, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2009": 0.6434782608695652, + "acc,exam_id__2016_2": 0.6178861788617886, + "acc,exam_id__2022": 0.6090225563909775 + }, + "faquad_nli": { + "f1_macro,all": 0.7831328616487405, + "acc,all": 0.8507692307692307, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8099141363386402, + "acc,all": 0.815 + }, + "oab_exams": { + "acc,all": 0.41594533029612757, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2015-16": 0.3625, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2012-06a": 0.375, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2015-17": 0.5128205128205128, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2013-11": 0.475, + "acc,exam_id__2012-06": 0.4875, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2015-18": 0.4, + "acc,exam_id__2018-25": 0.4375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.701880824433712, + "acc,all": 0.7579318448883666 + }, + "tweetsentbr": { + "f1_macro,all": 0.659589584358773, + "acc,all": 0.7074626865671642, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "479916ec1f7c46bbc61d1b8d48efde894a34aba1", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "479916ec1f7c46bbc61d1b8d48efde894a34aba1", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=bardsai/jaskier-7b-dpo-v5.6,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=bardsai/jaskier-7b-dpo-v5.6,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/bardsai/jaskier-7b-dpo-v5.6/raw_2024-04-19T00-41-39.307543/results.json b/bardsai/jaskier-7b-dpo-v5.6/raw_2024-04-19T00-41-39.307543/results.json index 427656f8c29f526af218a1fd102159ffc85a4ce2..4b21ab4443df0166dae50c9aa217bedcc9748d4c 100644 --- a/bardsai/jaskier-7b-dpo-v5.6/raw_2024-04-19T00-41-39.307543/results.json +++ b/bardsai/jaskier-7b-dpo-v5.6/raw_2024-04-19T00-41-39.307543/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9223754656270444, - "acc,all": 0.9223856209150327, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.776373064941006, - "mse,all": 0.43125858210784307, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5396383866481224, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__USP_2020": 0.5, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6312106368089573, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2010": 0.6837606837606838, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2016": 0.5702479338842975, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2012": 0.6120689655172413, - "acc,exam_id__2011": 0.6581196581196581 - }, - "faquad_nli": { - "f1_macro,all": 0.7886569534429237, - "acc,all": 0.8538461538461538, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8114721156360248, - "acc,all": 0.8164285714285714 - }, - "oab_exams": { - "acc,all": 0.4154897494305239, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2016-19": 0.48717948717948717, - "acc,exam_id__2012-06a": 0.375, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2013-11": 0.475, - "acc,exam_id__2012-06": 0.4875, - "acc,exam_id__2015-17": 0.5128205128205128, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2015-16": 0.3625, - "acc,exam_id__2017-22": 0.55, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2016-21": 0.3875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7015527450465484, - "acc,all": 0.7567567567567568 - }, - "tweetsentbr": { - "f1_macro,all": 0.5115777023800963, - "acc,all": 0.7139303482587065, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9223754656270444, + "acc,all": 0.9223856209150327, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.776373064941006, + "mse,all": 0.43125858210784307, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5396383866481224, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__USP_2020": 0.5, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6312106368089573, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2010": 0.6837606837606838, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2016": 0.5702479338842975, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2012": 0.6120689655172413, + "acc,exam_id__2011": 0.6581196581196581 + }, + "faquad_nli": { + "f1_macro,all": 0.7886569534429237, + "acc,all": 0.8538461538461538, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8114721156360248, + "acc,all": 0.8164285714285714 + }, + "oab_exams": { + "acc,all": 0.4154897494305239, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2016-19": 0.48717948717948717, + "acc,exam_id__2012-06a": 0.375, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2013-11": 0.475, + "acc,exam_id__2012-06": 0.4875, + "acc,exam_id__2015-17": 0.5128205128205128, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2015-16": 0.3625, + "acc,exam_id__2017-22": 0.55, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2016-21": 0.3875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7015527450465484, + "acc,all": 0.7567567567567568 + }, + "tweetsentbr": { + "f1_macro,all": 0.6821036031734615, + "acc,all": 0.7139303482587065, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "479916ec1f7c46bbc61d1b8d48efde894a34aba1", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "479916ec1f7c46bbc61d1b8d48efde894a34aba1", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1724.2492537313433, + "min_seq_length": 1703, + "max_seq_length": 1819, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=bardsai/jaskier-7b-dpo-v5.6,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1724.2492537313433, - "min_seq_length": 1703, - "max_seq_length": 1819, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=bardsai/jaskier-7b-dpo-v5.6,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/bardsai/jaskier-7b-dpo-v5.6/results_2024-02-26T22-47-54.215589.json b/bardsai/jaskier-7b-dpo-v5.6/results_2024-02-26T22-47-54.215589.json index bddf13bd6b60f295c1c04a5725b253bf6c5d9a52..0c4abe11067058f7818203b56f6e91a748560017 100644 --- a/bardsai/jaskier-7b-dpo-v5.6/results_2024-02-26T22-47-54.215589.json +++ b/bardsai/jaskier-7b-dpo-v5.6/results_2024-02-26T22-47-54.215589.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6757619115996536, - "all_grouped_npm": 0.5214463627134215, + "all_grouped_average": 0.6940838444985084, + "all_grouped_npm": 0.5487111438129078, "all_grouped": { "enem_challenge": 0.6340097970608818, "bluex": 0.5424200278164116, @@ -45,7 +45,7 @@ "faquad_nli": 0.7831328616487405, "hatebr_offensive": 0.8099141363386402, "portuguese_hate_speech": 0.701880824433712, - "tweetsentbr": 0.4946921882690798 + "tweetsentbr": 0.659589584358773 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6340097970608818, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7831328616487405, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8099141363386402, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.701880824433712, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4946921882690798 + "harness|tweetsentbr|tweetsentbr|None|25": 0.659589584358773 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6340097970608818, @@ -150,9 +150,9 @@ "main_score": 0.701880824433712 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4946921882690798, + "f1_macro,all": 0.659589584358773, "acc,all": 0.7074626865671642, - "main_score": 0.4946921882690798 + "main_score": 0.659589584358773 } }, "config_tasks": { diff --git a/bardsai/jaskier-7b-dpo-v5.6/results_2024-04-19T00-41-39.307543.json b/bardsai/jaskier-7b-dpo-v5.6/results_2024-04-19T00-41-39.307543.json index 3facc1f4bd9e5c578bb1dc40414336e8669eec49..4f185b68fa601667695949a44e0927d47d02be9f 100644 --- a/bardsai/jaskier-7b-dpo-v5.6/results_2024-04-19T00-41-39.307543.json +++ b/bardsai/jaskier-7b-dpo-v5.6/results_2024-04-19T00-41-39.307543.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6775940911068052, - "all_grouped_npm": 0.524618552550896, + "all_grouped_average": 0.696541413417179, + "all_grouped_npm": 0.5528139726556192, "all_grouped": { "enem_challenge": 0.6312106368089573, "bluex": 0.5396383866481224, @@ -45,7 +45,7 @@ "faquad_nli": 0.7886569534429237, "hatebr_offensive": 0.8114721156360248, "portuguese_hate_speech": 0.7015527450465484, - "tweetsentbr": 0.5115777023800963 + "tweetsentbr": 0.6821036031734615 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6312106368089573, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7886569534429237, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8114721156360248, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7015527450465484, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5115777023800963 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6821036031734615 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6312106368089573, @@ -150,9 +150,9 @@ "main_score": 0.7015527450465484 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5115777023800963, + "f1_macro,all": 0.6821036031734615, "acc,all": 0.7139303482587065, - "main_score": 0.5115777023800963 + "main_score": 0.6821036031734615 } }, "config_tasks": { diff --git a/berkeley-nest/Starling-LM-7B-alpha/raw_2024-03-08T14-01-57.709558/results.json b/berkeley-nest/Starling-LM-7B-alpha/raw_2024-03-08T14-01-57.709558/results.json index c11d7251fa53ec49f621e933157fa017939bd2df..c550ad1101825d50466041022d8a570dbe3021cd 100644 --- a/berkeley-nest/Starling-LM-7B-alpha/raw_2024-03-08T14-01-57.709558/results.json +++ b/berkeley-nest/Starling-LM-7B-alpha/raw_2024-03-08T14-01-57.709558/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.927284475393028, - "acc,all": 0.9272875816993464, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8013588331495842, - "mse,all": 0.4020098039215686, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5563282336578581, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__USP_2022": 0.6122448979591837, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2023": 0.5581395348837209, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, - "acc,exam_id__UNICAMP_2018": 0.5740740740740741, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6403079076277117, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2010": 0.6239316239316239, - "acc,exam_id__2011": 0.717948717948718, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2016": 0.5785123966942148, - "acc,exam_id__2023": 0.6518518518518519, - "acc,exam_id__2017": 0.646551724137931, - "acc,exam_id__2022": 0.6240601503759399, - "acc,exam_id__2015": 0.6470588235294118 - }, - "faquad_nli": { - "f1_macro,all": 0.784159073392008, - "acc,all": 0.8323076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8222299935886227, - "acc,all": 0.8257142857142857 - }, - "oab_exams": { - "acc,all": 0.4218678815489749, - "acc,exam_id__2017-24": 0.45, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2012-06a": 0.45, - "acc,exam_id__2015-17": 0.5128205128205128, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2016-20a": 0.425, - "acc,exam_id__2011-03": 0.30303030303030304, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2014-13": 0.3125, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2015-16": 0.4125, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2015-18": 0.45, - "acc,exam_id__2013-12": 0.4875, - "acc,exam_id__2014-15": 0.4358974358974359, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2017-22": 0.45, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2012-07": 0.4125, - "acc,exam_id__2012-09": 0.38961038961038963, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2010-02": 0.45, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7004364967614757, - "acc,all": 0.7414806110458284 - }, - "tweetsentbr": { - "f1_macro,all": 0.45688146565713056, - "acc,all": 0.6786069651741293, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.927284475393028, + "acc,all": 0.9272875816993464, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8013588331495842, + "mse,all": 0.4020098039215686, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5563282336578581, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__USP_2022": 0.6122448979591837, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2023": 0.5581395348837209, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, + "acc,exam_id__UNICAMP_2018": 0.5740740740740741, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6403079076277117, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2010": 0.6239316239316239, + "acc,exam_id__2011": 0.717948717948718, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2016": 0.5785123966942148, + "acc,exam_id__2023": 0.6518518518518519, + "acc,exam_id__2017": 0.646551724137931, + "acc,exam_id__2022": 0.6240601503759399, + "acc,exam_id__2015": 0.6470588235294118 + }, + "faquad_nli": { + "f1_macro,all": 0.784159073392008, + "acc,all": 0.8323076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8222299935886227, + "acc,all": 0.8257142857142857 + }, + "oab_exams": { + "acc,all": 0.4218678815489749, + "acc,exam_id__2017-24": 0.45, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2012-06a": 0.45, + "acc,exam_id__2015-17": 0.5128205128205128, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2016-20a": 0.425, + "acc,exam_id__2011-03": 0.30303030303030304, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2014-13": 0.3125, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2015-16": 0.4125, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2015-18": 0.45, + "acc,exam_id__2013-12": 0.4875, + "acc,exam_id__2014-15": 0.4358974358974359, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2017-22": 0.45, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2012-07": 0.4125, + "acc,exam_id__2012-09": 0.38961038961038963, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2010-02": 0.45, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7004364967614757, + "acc,all": 0.7414806110458284 + }, + "tweetsentbr": { + "f1_macro,all": 0.6091752875428408, + "acc,all": 0.6786069651741293, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "9dc75f55e3b3aed4275ae0dfd71bbbf2c1292ffa", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14617722880, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1584.7455065359477, - "min_seq_length": 1561, - "max_seq_length": 1651, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1824.7455065359477, - "min_seq_length": 1801, - "max_seq_length": 1891, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1782.9262865090404, - "min_seq_length": 1406, - "max_seq_length": 2583, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1683.039188243527, - "min_seq_length": 1417, - "max_seq_length": 2681, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1825.9876923076922, - "min_seq_length": 1770, - "max_seq_length": 1946, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1676.3878571428572, - "min_seq_length": 1653, - "max_seq_length": 1927, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "9dc75f55e3b3aed4275ae0dfd71bbbf2c1292ffa", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14617722880, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1428.764464692483, - "min_seq_length": 1162, - "max_seq_length": 1931, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1584.7455065359477, + "min_seq_length": 1561, + "max_seq_length": 1651, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1824.7455065359477, + "min_seq_length": 1801, + "max_seq_length": 1891, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1782.9262865090404, + "min_seq_length": 1406, + "max_seq_length": 2583, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1683.039188243527, + "min_seq_length": 1417, + "max_seq_length": 2681, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1825.9876923076922, + "min_seq_length": 1770, + "max_seq_length": 1946, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1676.3878571428572, + "min_seq_length": 1653, + "max_seq_length": 1927, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1428.764464692483, + "min_seq_length": 1162, + "max_seq_length": 1931, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2177.3360752056406, + "min_seq_length": 2142, + "max_seq_length": 2216, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1923.2492537313433, + "min_seq_length": 1902, + "max_seq_length": 2018, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2177.3360752056406, - "min_seq_length": 2142, - "max_seq_length": 2216, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=berkeley-nest/Starling-LM-7B-alpha,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1923.2492537313433, - "min_seq_length": 1902, - "max_seq_length": 2018, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=berkeley-nest/Starling-LM-7B-alpha,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/berkeley-nest/Starling-LM-7B-alpha/results_2024-03-08T14-01-57.709558.json b/berkeley-nest/Starling-LM-7B-alpha/results_2024-03-08T14-01-57.709558.json index f575e6ed32a3c862120e5ac10dfe3f682917e2ce..8b86f2eca270a68629162f55c656223bfef39617 100644 --- a/berkeley-nest/Starling-LM-7B-alpha/results_2024-03-08T14-01-57.709558.json +++ b/berkeley-nest/Starling-LM-7B-alpha/results_2024-03-08T14-01-57.709558.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6789838178640438, - "all_grouped_npm": 0.5252770761531216, + "all_grouped_average": 0.6959053536291226, + "all_grouped_npm": 0.5504579329463939, "all_grouped": { "enem_challenge": 0.6403079076277117, "bluex": 0.5563282336578581, @@ -45,7 +45,7 @@ "faquad_nli": 0.784159073392008, "hatebr_offensive": 0.8222299935886227, "portuguese_hate_speech": 0.7004364967614757, - "tweetsentbr": 0.45688146565713056 + "tweetsentbr": 0.6091752875428408 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6403079076277117, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.784159073392008, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8222299935886227, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7004364967614757, - "harness|tweetsentbr|tweetsentbr|None|25": 0.45688146565713056 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6091752875428408 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6403079076277117, @@ -150,9 +150,9 @@ "main_score": 0.7004364967614757 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.45688146565713056, + "f1_macro,all": 0.6091752875428408, "acc,all": 0.6786069651741293, - "main_score": 0.45688146565713056 + "main_score": 0.6091752875428408 } }, "config_tasks": { diff --git a/chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO/raw_2024-05-30T03-47-51.756674/results.json b/chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO/raw_2024-05-30T03-47-51.756674/results.json index e6507246ddac99371d383e9c36aef55b59434e3c..9db8214e65e17c1a70bb7cb88b6f0a8e18f7f1a5 100644 --- a/chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO/raw_2024-05-30T03-47-51.756674/results.json +++ b/chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO/raw_2024-05-30T03-47-51.756674/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8989008703317252, - "acc,all": 0.8991013071895425, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7162092421813775, - "mse,all": 1.1332142855473855, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5660639777468707, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2022": 0.6530612244897959, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__UNICAMP_2019": 0.64, - "acc,exam_id__USP_2024": 0.6585365853658537, - "acc,exam_id__USP_2019": 0.525, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6487053883834849, - "acc,exam_id__2013": 0.7037037037037037, - "acc,exam_id__2014": 0.5963302752293578, - "acc,exam_id__2012": 0.6293103448275862, - "acc,exam_id__2016": 0.6198347107438017, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2017": 0.646551724137931, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2011": 0.6923076923076923, - "acc,exam_id__2010": 0.6923076923076923, - "acc,exam_id__2023": 0.7111111111111111, - "acc,exam_id__2015": 0.6638655462184874, - "acc,exam_id__2022": 0.5789473684210527 - }, - "faquad_nli": { - "f1_macro,all": 0.5873015873015873, - "acc,all": 0.6107692307692307, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8148684191345671, - "acc,all": 0.8185714285714286 - }, - "oab_exams": { - "acc,all": 0.47927107061503416, - "acc,exam_id__2015-16": 0.425, - "acc,exam_id__2012-06a": 0.5375, - "acc,exam_id__2015-17": 0.5769230769230769, - "acc,exam_id__2014-14": 0.5375, - "acc,exam_id__2015-18": 0.4625, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2016-19": 0.6025641025641025, - "acc,exam_id__2011-03": 0.43434343434343436, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2012-06": 0.5375, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2016-20a": 0.3875, - "acc,exam_id__2010-02": 0.45, - "acc,exam_id__2013-11": 0.525, - "acc,exam_id__2012-09": 0.4025974025974026, - "acc,exam_id__2014-15": 0.5641025641025641, - "acc,exam_id__2017-22": 0.5625, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2012-08": 0.4625, - "acc,exam_id__2014-13": 0.3875, - "acc,exam_id__2018-25": 0.5375, - "acc,exam_id__2013-10": 0.5, - "acc,exam_id__2017-24": 0.45, - "acc,exam_id__2016-20": 0.5375, - "acc,exam_id__2013-12": 0.5, - "acc,exam_id__2012-07": 0.525, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7038037623270075, - "acc,all": 0.7285546415981199 - }, - "tweetsentbr": { - "f1_macro,all": 0.5090739997591226, - "acc,all": 0.7288557213930348, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8989008703317252, + "acc,all": 0.8991013071895425, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7162092421813775, + "mse,all": 1.1332142855473855, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5660639777468707, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2022": 0.6530612244897959, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__UNICAMP_2019": 0.64, + "acc,exam_id__USP_2024": 0.6585365853658537, + "acc,exam_id__USP_2019": 0.525, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6487053883834849, + "acc,exam_id__2013": 0.7037037037037037, + "acc,exam_id__2014": 0.5963302752293578, + "acc,exam_id__2012": 0.6293103448275862, + "acc,exam_id__2016": 0.6198347107438017, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2017": 0.646551724137931, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2011": 0.6923076923076923, + "acc,exam_id__2010": 0.6923076923076923, + "acc,exam_id__2023": 0.7111111111111111, + "acc,exam_id__2015": 0.6638655462184874, + "acc,exam_id__2022": 0.5789473684210527 + }, + "faquad_nli": { + "f1_macro,all": 0.5873015873015873, + "acc,all": 0.6107692307692307, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8148684191345671, + "acc,all": 0.8185714285714286 + }, + "oab_exams": { + "acc,all": 0.47927107061503416, + "acc,exam_id__2015-16": 0.425, + "acc,exam_id__2012-06a": 0.5375, + "acc,exam_id__2015-17": 0.5769230769230769, + "acc,exam_id__2014-14": 0.5375, + "acc,exam_id__2015-18": 0.4625, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2016-19": 0.6025641025641025, + "acc,exam_id__2011-03": 0.43434343434343436, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2012-06": 0.5375, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2016-20a": 0.3875, + "acc,exam_id__2010-02": 0.45, + "acc,exam_id__2013-11": 0.525, + "acc,exam_id__2012-09": 0.4025974025974026, + "acc,exam_id__2014-15": 0.5641025641025641, + "acc,exam_id__2017-22": 0.5625, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2012-08": 0.4625, + "acc,exam_id__2014-13": 0.3875, + "acc,exam_id__2018-25": 0.5375, + "acc,exam_id__2013-10": 0.5, + "acc,exam_id__2017-24": 0.45, + "acc,exam_id__2016-20": 0.5375, + "acc,exam_id__2013-12": 0.5, + "acc,exam_id__2012-07": 0.525, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7038037623270075, + "acc,all": 0.7285546415981199 + }, + "tweetsentbr": { + "f1_macro,all": 0.6787653330121635, + "acc,all": 0.7288557213930348, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "6438f7ba90107e3564c3e3cc6788bf7f8aae376f", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530688, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1169.5322712418301, - "min_seq_length": 1150, - "max_seq_length": 1233, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1375.5322712418301, - "min_seq_length": 1356, - "max_seq_length": 1439, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1443.769123783032, - "min_seq_length": 1124, - "max_seq_length": 2093, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1371.3547935619315, - "min_seq_length": 1146, - "max_seq_length": 2299, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1298.8215384615385, - "min_seq_length": 1253, - "max_seq_length": 1395, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "6438f7ba90107e3564c3e3cc6788bf7f8aae376f", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530688, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1040.3878571428572, - "min_seq_length": 1020, - "max_seq_length": 1259, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1179.3772209567198, - "min_seq_length": 947, - "max_seq_length": 1613, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1169.5322712418301, + "min_seq_length": 1150, + "max_seq_length": 1233, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1375.5322712418301, + "min_seq_length": 1356, + "max_seq_length": 1439, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1443.769123783032, + "min_seq_length": 1124, + "max_seq_length": 2093, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1371.3547935619315, + "min_seq_length": 1146, + "max_seq_length": 2299, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1298.8215384615385, + "min_seq_length": 1253, + "max_seq_length": 1395, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1040.3878571428572, + "min_seq_length": 1020, + "max_seq_length": 1259, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1179.3772209567198, + "min_seq_length": 947, + "max_seq_length": 1613, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1437.4195064629848, + "min_seq_length": 1407, + "max_seq_length": 1469, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1298.1537313432837, + "min_seq_length": 1281, + "max_seq_length": 1346, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1437.4195064629848, - "min_seq_length": 1407, - "max_seq_length": 1469, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1298.1537313432837, - "min_seq_length": 1281, - "max_seq_length": 1346, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO/results_2024-05-30T03-47-51.756674.json b/chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO/results_2024-05-30T03-47-51.756674.json index e7061d6e63b0280ddc0eb0968dfa4a954a69d894..6ab8f4a0257a4b3093d42d3e294525b17cf35c37 100644 --- a/chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO/results_2024-05-30T03-47-51.756674.json +++ b/chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO/results_2024-05-30T03-47-51.756674.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6582442575311975, - "all_grouped_npm": 0.48807895405566953, + "all_grouped_average": 0.6770988501148687, + "all_grouped_npm": 0.5161363834956565, "all_grouped": { "enem_challenge": 0.6487053883834849, "bluex": 0.5660639777468707, @@ -45,7 +45,7 @@ "faquad_nli": 0.5873015873015873, "hatebr_offensive": 0.8148684191345671, "portuguese_hate_speech": 0.7038037623270075, - "tweetsentbr": 0.5090739997591226 + "tweetsentbr": 0.6787653330121635 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6487053883834849, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.5873015873015873, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8148684191345671, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7038037623270075, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5090739997591226 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6787653330121635 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6487053883834849, @@ -150,9 +150,9 @@ "main_score": 0.7038037623270075 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5090739997591226, + "f1_macro,all": 0.6787653330121635, "acc,all": 0.7288557213930348, - "main_score": 0.5090739997591226 + "main_score": 0.6787653330121635 } }, "config_tasks": { diff --git a/cognitivecomputations/WestLake-7B-v2-laser/raw_2024-08-08T01-40-30.831958/results.json b/cognitivecomputations/WestLake-7B-v2-laser/raw_2024-08-08T01-40-30.831958/results.json index 127608b3c1ffdb65f652aa05d41f0e8361cde374..94e2bae840aac46937deacad4d1cd39f341b8b86 100644 --- a/cognitivecomputations/WestLake-7B-v2-laser/raw_2024-08-08T01-40-30.831958/results.json +++ b/cognitivecomputations/WestLake-7B-v2-laser/raw_2024-08-08T01-40-30.831958/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9190722183078603, - "acc,all": 0.9191176470588235, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7771685824919177, - "mse,all": 0.4459272875816994, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5465924895688457, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2020": 0.6363636363636364, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2020": 0.5892857142857143, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6445066480055983, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2011": 0.6837606837606838, - "acc,exam_id__2014": 0.6513761467889908, - "acc,exam_id__2016": 0.628099173553719, - "acc,exam_id__2016_2": 0.5934959349593496, - "acc,exam_id__2023": 0.6814814814814815, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2017": 0.6379310344827587, - "acc,exam_id__2015": 0.5966386554621849, - "acc,exam_id__2010": 0.6923076923076923, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2009": 0.6347826086956522 - }, - "faquad_nli": { - "f1_macro,all": 0.7046043452262308, - "acc,all": 0.7369230769230769, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8711919852790839, - "acc,all": 0.8714285714285714 - }, - "oab_exams": { - "acc,all": 0.4218678815489749, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2012-08": 0.3875, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2012-06a": 0.4, - "acc,exam_id__2015-18": 0.4375, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2017-23": 0.4, - "acc,exam_id__2013-11": 0.4875, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2016-20": 0.3875, - "acc,exam_id__2011-03": 0.37373737373737376, - "acc,exam_id__2015-17": 0.5384615384615384, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2017-22": 0.4875, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2013-12": 0.4125, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2012-09": 0.4025974025974026, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2016-21": 0.45, - "acc,exam_id__2010-01": 0.35294117647058826, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6580115182691992, - "acc,all": 0.6698002350176263 - }, - "tweetsentbr": { - "f1_macro,all": 0.4870501837530583, - "acc,all": 0.7034825870646766, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9190722183078603, + "acc,all": 0.9191176470588235, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7771685824919177, + "mse,all": 0.4459272875816994, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5465924895688457, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2020": 0.6363636363636364, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2020": 0.5892857142857143, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6445066480055983, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2011": 0.6837606837606838, + "acc,exam_id__2014": 0.6513761467889908, + "acc,exam_id__2016": 0.628099173553719, + "acc,exam_id__2016_2": 0.5934959349593496, + "acc,exam_id__2023": 0.6814814814814815, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2017": 0.6379310344827587, + "acc,exam_id__2015": 0.5966386554621849, + "acc,exam_id__2010": 0.6923076923076923, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2009": 0.6347826086956522 + }, + "faquad_nli": { + "f1_macro,all": 0.7046043452262308, + "acc,all": 0.7369230769230769, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8711919852790839, + "acc,all": 0.8714285714285714 + }, + "oab_exams": { + "acc,all": 0.4218678815489749, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2012-08": 0.3875, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2012-06a": 0.4, + "acc,exam_id__2015-18": 0.4375, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2017-23": 0.4, + "acc,exam_id__2013-11": 0.4875, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2016-20": 0.3875, + "acc,exam_id__2011-03": 0.37373737373737376, + "acc,exam_id__2015-17": 0.5384615384615384, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2017-22": 0.4875, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2013-12": 0.4125, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2012-09": 0.4025974025974026, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2016-21": 0.45, + "acc,exam_id__2010-01": 0.35294117647058826, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6580115182691992, + "acc,all": 0.6698002350176263 + }, + "tweetsentbr": { + "f1_macro,all": 0.6494002450040778, + "acc,all": 0.7034825870646766, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "0acaee8266dce7af1b34e8cefd1f2859d9944cd4", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14483472384, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "0acaee8266dce7af1b34e8cefd1f2859d9944cd4", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14483472384, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=cognitivecomputations/WestLake-7B-v2-laser,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=cognitivecomputations/WestLake-7B-v2-laser,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/cognitivecomputations/WestLake-7B-v2-laser/results_2024-08-08T01-40-30.831958.json b/cognitivecomputations/WestLake-7B-v2-laser/results_2024-08-08T01-40-30.831958.json index 1beb51bd05c679ad16d87670065bb0b8ab504423..cc801ab8f0f368c68bb773c771733f1d8a47b502 100644 --- a/cognitivecomputations/WestLake-7B-v2-laser/results_2024-08-08T01-40-30.831958.json +++ b/cognitivecomputations/WestLake-7B-v2-laser/results_2024-08-08T01-40-30.831958.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6700073169389742, - "all_grouped_npm": 0.5105236438265393, + "all_grouped_average": 0.6880462126335319, + "all_grouped_npm": 0.5373672386101072, "all_grouped": { "enem_challenge": 0.6445066480055983, "bluex": 0.5465924895688457, @@ -45,7 +45,7 @@ "faquad_nli": 0.7046043452262308, "hatebr_offensive": 0.8711919852790839, "portuguese_hate_speech": 0.6580115182691992, - "tweetsentbr": 0.4870501837530583 + "tweetsentbr": 0.6494002450040778 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6445066480055983, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7046043452262308, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8711919852790839, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6580115182691992, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4870501837530583 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6494002450040778 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6445066480055983, @@ -150,9 +150,9 @@ "main_score": 0.6580115182691992 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4870501837530583, + "f1_macro,all": 0.6494002450040778, "acc,all": 0.7034825870646766, - "main_score": 0.4870501837530583 + "main_score": 0.6494002450040778 } }, "config_tasks": { diff --git a/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/raw_2024-07-01T01-31-45.295693/results.json b/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/raw_2024-07-01T01-31-45.295693/results.json index 9a914511e666b4b7e1b9962468e294dcd7ebfa4b..2a38a8296f81ba488a9ba28d91f8fc986db0dd5b 100644 --- a/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/raw_2024-07-01T01-31-45.295693/results.json +++ b/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/raw_2024-07-01T01-31-45.295693/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9072711025673759, - "acc,all": 0.9072712418300654, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7700090320870142, - "mse,all": 0.48628333006535945, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.4937413073713491, - "acc,exam_id__UNICAMP_2018": 0.35185185185185186, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2020": 0.509090909090909, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__UNICAMP_2023": 0.5116279069767442, - "acc,exam_id__UNICAMP_2019": 0.48, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__USP_2023": 0.5454545454545454, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6004198740377886, - "acc,exam_id__2022": 0.556390977443609, - "acc,exam_id__2015": 0.5294117647058824, - "acc,exam_id__2011": 0.6837606837606838, - "acc,exam_id__2013": 0.5648148148148148, - "acc,exam_id__2017": 0.603448275862069, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2012": 0.6551724137931034, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2010": 0.5384615384615384, - "acc,exam_id__2023": 0.6592592592592592, - "acc,exam_id__2009": 0.5739130434782609 - }, - "faquad_nli": { - "f1_macro,all": 0.740456116464181, - "acc,all": 0.8123076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8124860915024096, - "acc,all": 0.8164285714285714 - }, - "oab_exams": { - "acc,all": 0.38997722095671983, - "acc,exam_id__2012-07": 0.3, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2017-23": 0.4875, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2012-06": 0.3875, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2016-20a": 0.3125, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2015-18": 0.35, - "acc,exam_id__2012-08": 0.3875, - "acc,exam_id__2010-02": 0.4, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2012-06a": 0.4, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2015-17": 0.4230769230769231, - "acc,exam_id__2017-24": 0.425, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2011-03": 0.29292929292929293, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2013-10": 0.275, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6487319880167007, - "acc,all": 0.6627497062279671 - }, - "tweetsentbr": { - "f1_macro,all": 0.48929174712022083, - "acc,all": 0.6985074626865672, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9072711025673759, + "acc,all": 0.9072712418300654, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7700090320870142, + "mse,all": 0.48628333006535945, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.4937413073713491, + "acc,exam_id__UNICAMP_2018": 0.35185185185185186, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2020": 0.509090909090909, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__UNICAMP_2023": 0.5116279069767442, + "acc,exam_id__UNICAMP_2019": 0.48, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__USP_2023": 0.5454545454545454, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6004198740377886, + "acc,exam_id__2022": 0.556390977443609, + "acc,exam_id__2015": 0.5294117647058824, + "acc,exam_id__2011": 0.6837606837606838, + "acc,exam_id__2013": 0.5648148148148148, + "acc,exam_id__2017": 0.603448275862069, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2012": 0.6551724137931034, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2010": 0.5384615384615384, + "acc,exam_id__2023": 0.6592592592592592, + "acc,exam_id__2009": 0.5739130434782609 + }, + "faquad_nli": { + "f1_macro,all": 0.740456116464181, + "acc,all": 0.8123076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8124860915024096, + "acc,all": 0.8164285714285714 + }, + "oab_exams": { + "acc,all": 0.38997722095671983, + "acc,exam_id__2012-07": 0.3, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2017-23": 0.4875, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2012-06": 0.3875, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2016-20a": 0.3125, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2015-18": 0.35, + "acc,exam_id__2012-08": 0.3875, + "acc,exam_id__2010-02": 0.4, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2012-06a": 0.4, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2015-17": 0.4230769230769231, + "acc,exam_id__2017-24": 0.425, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2011-03": 0.29292929292929293, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2013-10": 0.275, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6487319880167007, + "acc,all": 0.6627497062279671 + }, + "tweetsentbr": { + "f1_macro,all": 0.6523889961602944, + "acc,all": 0.6985074626865672, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "4f4273ee8e7930dd64e2c6121c79d12546b883e2", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15032958976, - "model_num_parameters": 7248039936, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "4f4273ee8e7930dd64e2c6121c79d12546b883e2", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15032958976, + "model_num_parameters": 7248039936, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=cognitivecomputations/dolphin-2.9.3-mistral-7B-32k,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=cognitivecomputations/dolphin-2.9.3-mistral-7B-32k,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/raw_2024-07-01T01-31-45.855293/results.json b/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/raw_2024-07-01T01-31-45.855293/results.json index e5a1f764f2ae70c2e448a53fbbd9dfdf9fa7d984..5d6c09bd2d75d6a10b5c49b532a79e422bee3356 100644 --- a/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/raw_2024-07-01T01-31-45.855293/results.json +++ b/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/raw_2024-07-01T01-31-45.855293/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9072711025673759, - "acc,all": 0.9072712418300654, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7700090320870142, - "mse,all": 0.48628333006535945, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.4937413073713491, - "acc,exam_id__UNICAMP_2019": 0.48, - "acc,exam_id__UNICAMP_2018": 0.35185185185185186, - "acc,exam_id__UNICAMP_2020": 0.509090909090909, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__USP_2023": 0.5454545454545454, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__UNICAMP_2023": 0.5116279069767442, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2022": 0.4897959183673469, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6004198740377886, - "acc,exam_id__2009": 0.5739130434782609, - "acc,exam_id__2023": 0.6592592592592592, - "acc,exam_id__2013": 0.5648148148148148, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2010": 0.5384615384615384, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2011": 0.6837606837606838, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2017": 0.603448275862069, - "acc,exam_id__2022": 0.556390977443609, - "acc,exam_id__2015": 0.5294117647058824, - "acc,exam_id__2012": 0.6551724137931034 - }, - "faquad_nli": { - "f1_macro,all": 0.740456116464181, - "acc,all": 0.8123076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8124860915024096, - "acc,all": 0.8164285714285714 - }, - "oab_exams": { - "acc,all": 0.38997722095671983, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2017-23": 0.4875, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2017-24": 0.425, - "acc,exam_id__2012-08": 0.3875, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2015-18": 0.35, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2010-02": 0.4, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2011-03": 0.29292929292929293, - "acc,exam_id__2013-10": 0.275, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2012-06": 0.3875, - "acc,exam_id__2012-07": 0.3, - "acc,exam_id__2012-06a": 0.4, - "acc,exam_id__2016-20a": 0.3125, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2015-17": 0.4230769230769231, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6487319880167007, - "acc,all": 0.6627497062279671 - }, - "tweetsentbr": { - "f1_macro,all": 0.48929174712022083, - "acc,all": 0.6985074626865672, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9072711025673759, + "acc,all": 0.9072712418300654, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7700090320870142, + "mse,all": 0.48628333006535945, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.4937413073713491, + "acc,exam_id__UNICAMP_2019": 0.48, + "acc,exam_id__UNICAMP_2018": 0.35185185185185186, + "acc,exam_id__UNICAMP_2020": 0.509090909090909, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__USP_2023": 0.5454545454545454, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__UNICAMP_2023": 0.5116279069767442, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2022": 0.4897959183673469, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6004198740377886, + "acc,exam_id__2009": 0.5739130434782609, + "acc,exam_id__2023": 0.6592592592592592, + "acc,exam_id__2013": 0.5648148148148148, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2010": 0.5384615384615384, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2011": 0.6837606837606838, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2017": 0.603448275862069, + "acc,exam_id__2022": 0.556390977443609, + "acc,exam_id__2015": 0.5294117647058824, + "acc,exam_id__2012": 0.6551724137931034 + }, + "faquad_nli": { + "f1_macro,all": 0.740456116464181, + "acc,all": 0.8123076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8124860915024096, + "acc,all": 0.8164285714285714 + }, + "oab_exams": { + "acc,all": 0.38997722095671983, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2017-23": 0.4875, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2017-24": 0.425, + "acc,exam_id__2012-08": 0.3875, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2015-18": 0.35, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2010-02": 0.4, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2011-03": 0.29292929292929293, + "acc,exam_id__2013-10": 0.275, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2012-06": 0.3875, + "acc,exam_id__2012-07": 0.3, + "acc,exam_id__2012-06a": 0.4, + "acc,exam_id__2016-20a": 0.3125, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2015-17": 0.4230769230769231, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6487319880167007, + "acc,all": 0.6627497062279671 + }, + "tweetsentbr": { + "f1_macro,all": 0.6523889961602944, + "acc,all": 0.6985074626865672, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "4f4273ee8e7930dd64e2c6121c79d12546b883e2", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15032958976, - "model_num_parameters": 7248039936, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "4f4273ee8e7930dd64e2c6121c79d12546b883e2", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15032958976, + "model_num_parameters": 7248039936, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=cognitivecomputations/dolphin-2.9.3-mistral-7B-32k,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=cognitivecomputations/dolphin-2.9.3-mistral-7B-32k,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/results_2024-07-01T01-31-45.295693.json b/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/results_2024-07-01T01-31-45.295693.json index 91ee8e4d043e0bb647c19e323075901cee6874d4..84c1be07d16569ce70aa2c863fe14025597035cd 100644 --- a/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/results_2024-07-01T01-31-45.295693.json +++ b/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/results_2024-07-01T01-31-45.295693.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6502649422359733, - "all_grouped_npm": 0.48134927282227913, + "all_grouped_average": 0.6683868587959816, + "all_grouped_npm": 0.5083164105603866, "all_grouped": { "enem_challenge": 0.6004198740377886, "bluex": 0.4937413073713491, @@ -45,7 +45,7 @@ "faquad_nli": 0.740456116464181, "hatebr_offensive": 0.8124860915024096, "portuguese_hate_speech": 0.6487319880167007, - "tweetsentbr": 0.48929174712022083 + "tweetsentbr": 0.6523889961602944 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6004198740377886, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.740456116464181, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8124860915024096, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6487319880167007, - "harness|tweetsentbr|tweetsentbr|None|25": 0.48929174712022083 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6523889961602944 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6004198740377886, @@ -150,9 +150,9 @@ "main_score": 0.6487319880167007 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.48929174712022083, + "f1_macro,all": 0.6523889961602944, "acc,all": 0.6985074626865672, - "main_score": 0.48929174712022083 + "main_score": 0.6523889961602944 } }, "config_tasks": { diff --git a/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/results_2024-07-01T01-31-45.855293.json b/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/results_2024-07-01T01-31-45.855293.json index a0a034679d39ec462fbee20aa3553099c0c95443..92ad311c8e3d8b3b8be6974951f010de2c7547db 100644 --- a/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/results_2024-07-01T01-31-45.855293.json +++ b/cognitivecomputations/dolphin-2.9.3-mistral-7B-32k/results_2024-07-01T01-31-45.855293.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6502649422359733, - "all_grouped_npm": 0.48134927282227913, + "all_grouped_average": 0.6683868587959816, + "all_grouped_npm": 0.5083164105603866, "all_grouped": { "enem_challenge": 0.6004198740377886, "bluex": 0.4937413073713491, @@ -45,7 +45,7 @@ "faquad_nli": 0.740456116464181, "hatebr_offensive": 0.8124860915024096, "portuguese_hate_speech": 0.6487319880167007, - "tweetsentbr": 0.48929174712022083 + "tweetsentbr": 0.6523889961602944 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6004198740377886, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.740456116464181, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8124860915024096, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6487319880167007, - "harness|tweetsentbr|tweetsentbr|None|25": 0.48929174712022083 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6523889961602944 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6004198740377886, @@ -150,9 +150,9 @@ "main_score": 0.6487319880167007 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.48929174712022083, + "f1_macro,all": 0.6523889961602944, "acc,all": 0.6985074626865672, - "main_score": 0.48929174712022083 + "main_score": 0.6523889961602944 } }, "config_tasks": { diff --git a/cognitivecomputations/laserxtral/raw_2024-08-12T01-36-06.572396/results.json b/cognitivecomputations/laserxtral/raw_2024-08-12T01-36-06.572396/results.json index 5c69beb982546843eb1bcf6a86563c0ca8a93dd0..d19bf7c3d088b93fa668aea9a5f5e03953cbb9ce 100644 --- a/cognitivecomputations/laserxtral/raw_2024-08-12T01-36-06.572396/results.json +++ b/cognitivecomputations/laserxtral/raw_2024-08-12T01-36-06.572396/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9231594800094836, - "acc,all": 0.923202614379085, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7829095943476577, - "mse,all": 0.4700857843137255, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5479833101529903, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__UNICAMP_2023": 0.5581395348837209, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2019": 0.475, - "acc,exam_id__UNICAMP_2019": 0.66, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6466060181945417, - "acc,exam_id__2009": 0.6086956521739131, - "acc,exam_id__2012": 0.6551724137931034, - "acc,exam_id__2010": 0.6153846153846154, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2013": 0.6666666666666666, - "acc,exam_id__2011": 0.6923076923076923, - "acc,exam_id__2015": 0.6386554621848739, - "acc,exam_id__2016_2": 0.6178861788617886, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2016": 0.628099173553719, - "acc,exam_id__2023": 0.674074074074074 - }, - "faquad_nli": { - "f1_macro,all": 0.766772890921475, - "acc,all": 0.8184615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8362695115615253, - "acc,all": 0.8385714285714285 - }, - "oab_exams": { - "acc,all": 0.43143507972665146, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2013-10": 0.3875, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2014-14": 0.55, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2016-19": 0.48717948717948717, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2017-24": 0.425, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2014-15": 0.5, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2016-20": 0.475, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2012-06a": 0.4375, - "acc,exam_id__2015-18": 0.5125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.657702979993471, - "acc,all": 0.6756756756756757 - }, - "tweetsentbr": { - "f1_macro,all": 0.4842084222798414, - "acc,all": 0.7009950248756219, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9231594800094836, + "acc,all": 0.923202614379085, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7829095943476577, + "mse,all": 0.4700857843137255, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5479833101529903, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__UNICAMP_2023": 0.5581395348837209, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2019": 0.475, + "acc,exam_id__UNICAMP_2019": 0.66, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6466060181945417, + "acc,exam_id__2009": 0.6086956521739131, + "acc,exam_id__2012": 0.6551724137931034, + "acc,exam_id__2010": 0.6153846153846154, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2013": 0.6666666666666666, + "acc,exam_id__2011": 0.6923076923076923, + "acc,exam_id__2015": 0.6386554621848739, + "acc,exam_id__2016_2": 0.6178861788617886, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2016": 0.628099173553719, + "acc,exam_id__2023": 0.674074074074074 + }, + "faquad_nli": { + "f1_macro,all": 0.766772890921475, + "acc,all": 0.8184615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8362695115615253, + "acc,all": 0.8385714285714285 + }, + "oab_exams": { + "acc,all": 0.43143507972665146, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2013-10": 0.3875, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2014-14": 0.55, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2016-19": 0.48717948717948717, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2017-24": 0.425, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2014-15": 0.5, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2016-20": 0.475, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2012-06a": 0.4375, + "acc,exam_id__2015-18": 0.5125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.657702979993471, + "acc,all": 0.6756756756756757 + }, + "tweetsentbr": { + "f1_macro,all": 0.6456112297064551, + "acc,all": 0.7009950248756219, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "1eb0192a8181eb7ce68c2d2947b3bcb79c02b3c2", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "1eb0192a8181eb7ce68c2d2947b3bcb79c02b3c2", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=cognitivecomputations/laserxtral,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=cognitivecomputations/laserxtral,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/cognitivecomputations/laserxtral/results_2024-08-12T01-36-06.572396.json b/cognitivecomputations/laserxtral/results_2024-08-12T01-36-06.572396.json index 60c868fbf4da1edea041228a1e64adb37c0ffaea..51d1ede82dc4bf549fbf67d06ca9b2044d7aa714 100644 --- a/cognitivecomputations/laserxtral/results_2024-08-12T01-36-06.572396.json +++ b/cognitivecomputations/laserxtral/results_2024-08-12T01-36-06.572396.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6752274763541819, - "all_grouped_npm": 0.5183797628468771, + "all_grouped_average": 0.6931611216238057, + "all_grouped_npm": 0.5450667349742935, "all_grouped": { "enem_challenge": 0.6466060181945417, "bluex": 0.5479833101529903, @@ -45,7 +45,7 @@ "faquad_nli": 0.766772890921475, "hatebr_offensive": 0.8362695115615253, "portuguese_hate_speech": 0.657702979993471, - "tweetsentbr": 0.4842084222798414 + "tweetsentbr": 0.6456112297064551 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6466060181945417, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.766772890921475, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8362695115615253, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.657702979993471, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4842084222798414 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6456112297064551 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6466060181945417, @@ -150,9 +150,9 @@ "main_score": 0.657702979993471 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4842084222798414, + "f1_macro,all": 0.6456112297064551, "acc,all": 0.7009950248756219, - "main_score": 0.4842084222798414 + "main_score": 0.6456112297064551 } }, "config_tasks": { diff --git a/cognitivecomputations/openchat-3.5-0106-laser/raw_2024-08-08T01-36-00.321142/results.json b/cognitivecomputations/openchat-3.5-0106-laser/raw_2024-08-08T01-36-00.321142/results.json index c3934275576ed5499849a60eb062e9c185680d3f..7ebfac304693afb05facc78c746572bb3b2f7b67 100644 --- a/cognitivecomputations/openchat-3.5-0106-laser/raw_2024-08-08T01-36-00.321142/results.json +++ b/cognitivecomputations/openchat-3.5-0106-laser/raw_2024-08-08T01-36-00.321142/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9248076923076923, - "acc,all": 0.9248366013071896, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8345948997950121, - "mse,all": 0.349142156862745, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5312934631432545, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2022": 0.6122448979591837, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, - "acc,exam_id__UNICAMP_2023": 0.5348837209302325, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6452064380685795, - "acc,exam_id__2016_2": 0.6585365853658537, - "acc,exam_id__2016": 0.6033057851239669, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2011": 0.717948717948718, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2013": 0.6388888888888888, - "acc,exam_id__2010": 0.6324786324786325, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2023": 0.6666666666666666, - "acc,exam_id__2022": 0.6541353383458647 - }, - "faquad_nli": { - "f1_macro,all": 0.7866754184443715, - "acc,all": 0.8569230769230769, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7732668320172721, - "acc,all": 0.7821428571428571 - }, - "oab_exams": { - "acc,all": 0.4496583143507973, - "acc,exam_id__2013-10": 0.45, - "acc,exam_id__2012-09": 0.4025974025974026, - "acc,exam_id__2010-02": 0.45, - "acc,exam_id__2015-18": 0.4875, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2015-17": 0.5641025641025641, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2016-20": 0.4625, - "acc,exam_id__2011-04": 0.3375, - "acc,exam_id__2012-06a": 0.5, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2014-15": 0.5256410256410257, - "acc,exam_id__2012-06": 0.45, - "acc,exam_id__2013-12": 0.5, - "acc,exam_id__2010-01": 0.4470588235294118, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2016-19": 0.5256410256410257, - "acc,exam_id__2016-21": 0.425, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2014-14": 0.5875, - "acc,exam_id__2014-13": 0.4125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.711623178583531, - "acc,all": 0.7743830787309048 - }, - "tweetsentbr": { - "f1_macro,all": 0.4944286431942644, - "acc,all": 0.6960199004975124, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9248076923076923, + "acc,all": 0.9248366013071896, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8345948997950121, + "mse,all": 0.349142156862745, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5312934631432545, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2022": 0.6122448979591837, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, + "acc,exam_id__UNICAMP_2023": 0.5348837209302325, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6452064380685795, + "acc,exam_id__2016_2": 0.6585365853658537, + "acc,exam_id__2016": 0.6033057851239669, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2011": 0.717948717948718, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2013": 0.6388888888888888, + "acc,exam_id__2010": 0.6324786324786325, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2023": 0.6666666666666666, + "acc,exam_id__2022": 0.6541353383458647 + }, + "faquad_nli": { + "f1_macro,all": 0.7866754184443715, + "acc,all": 0.8569230769230769, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7732668320172721, + "acc,all": 0.7821428571428571 + }, + "oab_exams": { + "acc,all": 0.4496583143507973, + "acc,exam_id__2013-10": 0.45, + "acc,exam_id__2012-09": 0.4025974025974026, + "acc,exam_id__2010-02": 0.45, + "acc,exam_id__2015-18": 0.4875, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2015-17": 0.5641025641025641, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2016-20": 0.4625, + "acc,exam_id__2011-04": 0.3375, + "acc,exam_id__2012-06a": 0.5, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2014-15": 0.5256410256410257, + "acc,exam_id__2012-06": 0.45, + "acc,exam_id__2013-12": 0.5, + "acc,exam_id__2010-01": 0.4470588235294118, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2016-19": 0.5256410256410257, + "acc,exam_id__2016-21": 0.425, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2014-14": 0.5875, + "acc,exam_id__2014-13": 0.4125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.711623178583531, + "acc,all": 0.7743830787309048 + }, + "tweetsentbr": { + "f1_macro,all": 0.6592381909256858, + "acc,all": 0.6960199004975124, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "62c30dc92aa9ba9070ff0f726440029aaf5bed34", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14483505152, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1584.7455065359477, - "min_seq_length": 1561, - "max_seq_length": 1651, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1824.7455065359477, - "min_seq_length": 1801, - "max_seq_length": 1891, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1782.9262865090404, - "min_seq_length": 1406, - "max_seq_length": 2583, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1683.039188243527, - "min_seq_length": 1417, - "max_seq_length": 2681, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1825.9876923076922, - "min_seq_length": 1770, - "max_seq_length": 1946, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "62c30dc92aa9ba9070ff0f726440029aaf5bed34", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14483505152, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1676.3878571428572, - "min_seq_length": 1653, - "max_seq_length": 1927, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1428.764464692483, - "min_seq_length": 1162, - "max_seq_length": 1931, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1584.7455065359477, + "min_seq_length": 1561, + "max_seq_length": 1651, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1824.7455065359477, + "min_seq_length": 1801, + "max_seq_length": 1891, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1782.9262865090404, + "min_seq_length": 1406, + "max_seq_length": 2583, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1683.039188243527, + "min_seq_length": 1417, + "max_seq_length": 2681, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1825.9876923076922, + "min_seq_length": 1770, + "max_seq_length": 1946, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1676.3878571428572, + "min_seq_length": 1653, + "max_seq_length": 1927, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1428.764464692483, + "min_seq_length": 1162, + "max_seq_length": 1931, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2177.3360752056406, + "min_seq_length": 2142, + "max_seq_length": 2216, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1923.2492537313433, + "min_seq_length": 1902, + "max_seq_length": 2018, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2177.3360752056406, - "min_seq_length": 2142, - "max_seq_length": 2216, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=cognitivecomputations/openchat-3.5-0106-laser,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1923.2492537313433, - "min_seq_length": 1902, - "max_seq_length": 2018, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=cognitivecomputations/openchat-3.5-0106-laser,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/cognitivecomputations/openchat-3.5-0106-laser/results_2024-08-08T01-36-00.321142.json b/cognitivecomputations/openchat-3.5-0106-laser/results_2024-08-08T01-36-00.321142.json index 6d2ae9ebbddefbeface771e2864e55cfec26509b..278bf4d6315b3ddbf40680e628c2734f8be16cc1 100644 --- a/cognitivecomputations/openchat-3.5-0106-laser/results_2024-08-08T01-36-00.321142.json +++ b/cognitivecomputations/openchat-3.5-0106-laser/results_2024-08-08T01-36-00.321142.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6835060977671972, - "all_grouped_npm": 0.5278549972279424, + "all_grouped_average": 0.7018182697373552, + "all_grouped_npm": 0.5551052531359155, "all_grouped": { "enem_challenge": 0.6452064380685795, "bluex": 0.5312934631432545, @@ -45,7 +45,7 @@ "faquad_nli": 0.7866754184443715, "hatebr_offensive": 0.7732668320172721, "portuguese_hate_speech": 0.711623178583531, - "tweetsentbr": 0.4944286431942644 + "tweetsentbr": 0.6592381909256858 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6452064380685795, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7866754184443715, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7732668320172721, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.711623178583531, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4944286431942644 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6592381909256858 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6452064380685795, @@ -150,9 +150,9 @@ "main_score": 0.711623178583531 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4944286431942644, + "f1_macro,all": 0.6592381909256858, "acc,all": 0.6960199004975124, - "main_score": 0.4944286431942644 + "main_score": 0.6592381909256858 } }, "config_tasks": { diff --git a/dominguesm/mambarim-110m/raw_2024-04-18T22-38-53.882980/results.json b/dominguesm/mambarim-110m/raw_2024-04-18T22-38-53.882980/results.json index 9c393a0b85ef01a127879a6165339009fa8d7bab..45e45e7354cad00e7306cc5806faeda3521a2044 100644 --- a/dominguesm/mambarim-110m/raw_2024-04-18T22-38-53.882980/results.json +++ b/dominguesm/mambarim-110m/raw_2024-04-18T22-38-53.882980/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.16089990032749538, - "acc,all": 0.23080065359477125, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.018889657528727573, - "mse,all": 3.1399800604461343, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.10570236439499305, - "acc,exam_id__USP_2023": 0.06818181818181818, - "acc,exam_id__UNICAMP_2021_1": 0.1956521739130435, - "acc,exam_id__UNICAMP_2024": 0.08888888888888889, - "acc,exam_id__USP_2020": 0.05357142857142857, - "acc,exam_id__UNICAMP_2018": 0.037037037037037035, - "acc,exam_id__USP_2019": 0.125, - "acc,exam_id__USP_2021": 0.09615384615384616, - "acc,exam_id__USP_2024": 0.04878048780487805, - "acc,exam_id__UNICAMP_2019": 0.18, - "acc,exam_id__UNICAMP_2020": 0.18181818181818182, - "acc,exam_id__USP_2022": 0.08163265306122448, - "acc,exam_id__UNICAMP_2023": 0.18604651162790697, - "acc,exam_id__UNICAMP_2022": 0.10256410256410256, - "acc,exam_id__USP_2018": 0.09259259259259259, - "acc,exam_id__UNICAMP_2021_2": 0.058823529411764705, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.18404478656403078, - "acc,exam_id__2017": 0.22413793103448276, - "acc,exam_id__2014": 0.1743119266055046, - "acc,exam_id__2016": 0.1652892561983471, - "acc,exam_id__2013": 0.1574074074074074, - "acc,exam_id__2022": 0.22556390977443608, - "acc,exam_id__2023": 0.2222222222222222, - "acc,exam_id__2015": 0.12605042016806722, - "acc,exam_id__2010": 0.18803418803418803, - "acc,exam_id__2016_2": 0.17886178861788618, - "acc,exam_id__2009": 0.16521739130434782, - "acc,exam_id__2012": 0.1810344827586207, - "acc,exam_id__2011": 0.18803418803418803 - }, - "faquad_nli": { - "f1_macro,all": 0.09286687869150385, - "acc,all": 0.1276923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.15746753246753245, - "acc,all": 0.20785714285714285 - }, - "oab_exams": { - "acc,all": 0.21867881548974943, - "acc,exam_id__2018-25": 0.2375, - "acc,exam_id__2015-16": 0.225, - "acc,exam_id__2012-06a": 0.275, - "acc,exam_id__2016-21": 0.225, - "acc,exam_id__2013-11": 0.1875, - "acc,exam_id__2012-07": 0.1375, - "acc,exam_id__2016-20a": 0.3125, - "acc,exam_id__2015-17": 0.1794871794871795, - "acc,exam_id__2015-18": 0.25, - "acc,exam_id__2014-13": 0.1875, - "acc,exam_id__2011-05": 0.2625, - "acc,exam_id__2017-22": 0.225, - "acc,exam_id__2012-09": 0.19480519480519481, - "acc,exam_id__2013-12": 0.2, - "acc,exam_id__2016-20": 0.15, - "acc,exam_id__2010-02": 0.24, - "acc,exam_id__2014-14": 0.225, - "acc,exam_id__2012-06": 0.225, - "acc,exam_id__2017-23": 0.1875, - "acc,exam_id__2013-10": 0.2625, - "acc,exam_id__2017-24": 0.2, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2011-03": 0.20202020202020202, - "acc,exam_id__2011-04": 0.225, - "acc,exam_id__2014-15": 0.1794871794871795, - "acc,exam_id__2016-19": 0.1794871794871795, - "acc,exam_id__2012-08": 0.2125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.17768450507170339, - "acc,all": 0.2984723854289072 - }, - "tweetsentbr": { - "f1_macro,all": 0.1578881090325337, - "acc,all": 0.16766169154228855, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.24134985049124305, + "acc,all": 0.23080065359477125, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.018889657528727573, + "mse,all": 3.1399800604461343, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.10570236439499305, + "acc,exam_id__USP_2023": 0.06818181818181818, + "acc,exam_id__UNICAMP_2021_1": 0.1956521739130435, + "acc,exam_id__UNICAMP_2024": 0.08888888888888889, + "acc,exam_id__USP_2020": 0.05357142857142857, + "acc,exam_id__UNICAMP_2018": 0.037037037037037035, + "acc,exam_id__USP_2019": 0.125, + "acc,exam_id__USP_2021": 0.09615384615384616, + "acc,exam_id__USP_2024": 0.04878048780487805, + "acc,exam_id__UNICAMP_2019": 0.18, + "acc,exam_id__UNICAMP_2020": 0.18181818181818182, + "acc,exam_id__USP_2022": 0.08163265306122448, + "acc,exam_id__UNICAMP_2023": 0.18604651162790697, + "acc,exam_id__UNICAMP_2022": 0.10256410256410256, + "acc,exam_id__USP_2018": 0.09259259259259259, + "acc,exam_id__UNICAMP_2021_2": 0.058823529411764705, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.18404478656403078, + "acc,exam_id__2017": 0.22413793103448276, + "acc,exam_id__2014": 0.1743119266055046, + "acc,exam_id__2016": 0.1652892561983471, + "acc,exam_id__2013": 0.1574074074074074, + "acc,exam_id__2022": 0.22556390977443608, + "acc,exam_id__2023": 0.2222222222222222, + "acc,exam_id__2015": 0.12605042016806722, + "acc,exam_id__2010": 0.18803418803418803, + "acc,exam_id__2016_2": 0.17886178861788618, + "acc,exam_id__2009": 0.16521739130434782, + "acc,exam_id__2012": 0.1810344827586207, + "acc,exam_id__2011": 0.18803418803418803 + }, + "faquad_nli": { + "f1_macro,all": 0.1393003180372558, + "acc,all": 0.1276923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.2362012987012987, + "acc,all": 0.20785714285714285 + }, + "oab_exams": { + "acc,all": 0.21867881548974943, + "acc,exam_id__2018-25": 0.2375, + "acc,exam_id__2015-16": 0.225, + "acc,exam_id__2012-06a": 0.275, + "acc,exam_id__2016-21": 0.225, + "acc,exam_id__2013-11": 0.1875, + "acc,exam_id__2012-07": 0.1375, + "acc,exam_id__2016-20a": 0.3125, + "acc,exam_id__2015-17": 0.1794871794871795, + "acc,exam_id__2015-18": 0.25, + "acc,exam_id__2014-13": 0.1875, + "acc,exam_id__2011-05": 0.2625, + "acc,exam_id__2017-22": 0.225, + "acc,exam_id__2012-09": 0.19480519480519481, + "acc,exam_id__2013-12": 0.2, + "acc,exam_id__2016-20": 0.15, + "acc,exam_id__2010-02": 0.24, + "acc,exam_id__2014-14": 0.225, + "acc,exam_id__2012-06": 0.225, + "acc,exam_id__2017-23": 0.1875, + "acc,exam_id__2013-10": 0.2625, + "acc,exam_id__2017-24": 0.2, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2011-03": 0.20202020202020202, + "acc,exam_id__2011-04": 0.225, + "acc,exam_id__2014-15": 0.1794871794871795, + "acc,exam_id__2016-19": 0.1794871794871795, + "acc,exam_id__2012-08": 0.2125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.26652675760755506, + "acc,all": 0.2984723854289072 + }, + "tweetsentbr": { + "f1_macro,all": 0.21051747871004492, + "acc,all": 0.16766169154228855, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 2, - "non_truncated": 14148, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "885bd356857bb5a87bc877149db4919bbe8eed9e", - "model_dtype": "torch.float16", - "model_memory_footprint": 140262912, - "model_num_parameters": 69836544, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 64, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 925.4232026143791, - "min_seq_length": 910, - "max_seq_length": 964, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 966.4232026143791, - "min_seq_length": 951, - "max_seq_length": 1005, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1171.817802503477, - "min_seq_length": 905, - "max_seq_length": 1802, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 4, - "mean_seq_length": 1008.4177746675997, - "min_seq_length": 830, - "max_seq_length": 2485, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972008397480754 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 969.1338461538462, - "min_seq_length": 937, - "max_seq_length": 1035, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 2, + "non_truncated": 14148, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "885bd356857bb5a87bc877149db4919bbe8eed9e", + "model_dtype": "torch.float16", + "model_memory_footprint": 140262912, + "model_num_parameters": 69836544, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 64, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 868.4407142857143, - "min_seq_length": 853, - "max_seq_length": 1062, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 833.024145785877, - "min_seq_length": 660, - "max_seq_length": 1109, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 925.4232026143791, + "min_seq_length": 910, + "max_seq_length": 964, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 966.4232026143791, + "min_seq_length": 951, + "max_seq_length": 1005, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1171.817802503477, + "min_seq_length": 905, + "max_seq_length": 1802, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 4, + "mean_seq_length": 1008.4177746675997, + "min_seq_length": 830, + "max_seq_length": 2485, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972008397480754 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 969.1338461538462, + "min_seq_length": 937, + "max_seq_length": 1035, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 868.4407142857143, + "min_seq_length": 853, + "max_seq_length": 1062, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 833.024145785877, + "min_seq_length": 660, + "max_seq_length": 1109, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1220.021151586369, + "min_seq_length": 1193, + "max_seq_length": 1256, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1356.4194029850746, + "min_seq_length": 1339, + "max_seq_length": 1413, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1220.021151586369, - "min_seq_length": 1193, - "max_seq_length": 1256, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=dominguesm/mambarim-110m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1356.4194029850746, - "min_seq_length": 1339, - "max_seq_length": 1413, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=dominguesm/mambarim-110m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/dominguesm/mambarim-110m/results_2024-04-18T22-38-53.882980.json b/dominguesm/mambarim-110m/results_2024-04-18T22-38-53.882980.json index 6e6ae9338779503f4c9ace6a00ef7c31a35d55fa..cedbf029bf36dfc54d9dc18523f631acc38ce552 100644 --- a/dominguesm/mambarim-110m/results_2024-04-18T22-38-53.882980.json +++ b/dominguesm/mambarim-110m/results_2024-04-18T22-38-53.882980.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.14156917217425216, - "all_grouped_npm": -0.33989122247229714, + "all_grouped_average": 0.18013459194721093, + "all_grouped_npm": -0.26738421226350767, "all_grouped": { "enem_challenge": 0.18404478656403078, "bluex": 0.10570236439499305, "oab_exams": 0.21867881548974943, - "assin2_rte": 0.16089990032749538, + "assin2_rte": 0.24134985049124305, "assin2_sts": 0.018889657528727573, - "faquad_nli": 0.09286687869150385, - "hatebr_offensive": 0.15746753246753245, - "portuguese_hate_speech": 0.17768450507170339, - "tweetsentbr": 0.1578881090325337 + "faquad_nli": 0.1393003180372558, + "hatebr_offensive": 0.2362012987012987, + "portuguese_hate_speech": 0.26652675760755506, + "tweetsentbr": 0.21051747871004492 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.18404478656403078, "harness|bluex|bluex|None|3": 0.10570236439499305, "harness|oab_exams|oab_exams|None|3": 0.21867881548974943, - "harness|assin2_rte|assin2_rte|None|15": 0.16089990032749538, + "harness|assin2_rte|assin2_rte|None|15": 0.24134985049124305, "harness|assin2_sts|assin2_sts|None|15": 0.018889657528727573, - "harness|faquad_nli|faquad_nli|None|15": 0.09286687869150385, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.15746753246753245, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.17768450507170339, - "harness|tweetsentbr|tweetsentbr|None|25": 0.1578881090325337 + "harness|faquad_nli|faquad_nli|None|15": 0.1393003180372558, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.2362012987012987, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.26652675760755506, + "harness|tweetsentbr|tweetsentbr|None|25": 0.21051747871004492 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.18404478656403078, @@ -125,9 +125,9 @@ "main_score": 0.21867881548974943 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.16089990032749538, + "f1_macro,all": 0.24134985049124305, "acc,all": 0.23080065359477125, - "main_score": 0.16089990032749538 + "main_score": 0.24134985049124305 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.018889657528727573, @@ -135,24 +135,24 @@ "main_score": 0.018889657528727573 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.09286687869150385, + "f1_macro,all": 0.1393003180372558, "acc,all": 0.1276923076923077, - "main_score": 0.09286687869150385 + "main_score": 0.1393003180372558 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.15746753246753245, + "f1_macro,all": 0.2362012987012987, "acc,all": 0.20785714285714285, - "main_score": 0.15746753246753245 + "main_score": 0.2362012987012987 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.17768450507170339, + "f1_macro,all": 0.26652675760755506, "acc,all": 0.2984723854289072, - "main_score": 0.17768450507170339 + "main_score": 0.26652675760755506 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.1578881090325337, + "f1_macro,all": 0.21051747871004492, "acc,all": 0.16766169154228855, - "main_score": 0.1578881090325337 + "main_score": 0.21051747871004492 } }, "config_tasks": { diff --git a/dzakwan/dzakwan-MoE-4x7b-Beta/raw_2024-06-13T02-34-28.888859/results.json b/dzakwan/dzakwan-MoE-4x7b-Beta/raw_2024-06-13T02-34-28.888859/results.json index 5837d0d3e9b0e9ef39a73fc1910b2e77abb21287..20744f2dfbc7df27dfaeb9b16cc87050bf879509 100644 --- a/dzakwan/dzakwan-MoE-4x7b-Beta/raw_2024-06-13T02-34-28.888859/results.json +++ b/dzakwan/dzakwan-MoE-4x7b-Beta/raw_2024-06-13T02-34-28.888859/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.41699906286995536, - "acc,all": 0.6519607843137255, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.5728700306043661, - "mse,all": 0.7793033088235294, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5201668984700973, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2020": 0.6181818181818182, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2022": 0.5714285714285714, - "acc,exam_id__UNICAMP_2019": 0.52, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.58222533240028, - "acc,exam_id__2013": 0.5925925925925926, - "acc,exam_id__2023": 0.6148148148148148, - "acc,exam_id__2015": 0.5882352941176471, - "acc,exam_id__2014": 0.5871559633027523, - "acc,exam_id__2016": 0.5454545454545454, - "acc,exam_id__2017": 0.6206896551724138, - "acc,exam_id__2010": 0.5641025641025641, - "acc,exam_id__2012": 0.5344827586206896, - "acc,exam_id__2011": 0.6324786324786325, - "acc,exam_id__2022": 0.5714285714285714, - "acc,exam_id__2016_2": 0.5609756097560976, - "acc,exam_id__2009": 0.5739130434782609 - }, - "faquad_nli": { - "f1_macro,all": 0.5516489252623926, - "acc,all": 0.7723076923076924, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6730588787239034, - "acc,all": 0.6814285714285714 - }, - "oab_exams": { - "acc,all": 0.39726651480637815, - "acc,exam_id__2017-23": 0.325, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2012-06": 0.3875, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2014-13": 0.3625, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2013-10": 0.325, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2012-06a": 0.375, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2016-20": 0.3375, - "acc,exam_id__2017-22": 0.4875, - "acc,exam_id__2015-17": 0.47435897435897434, - "acc,exam_id__2017-24": 0.375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.575891283472511, - "acc,all": 0.6145710928319624 - }, - "tweetsentbr": { - "f1_macro,all": 0.5263769536717947, - "acc,all": 0.5562189054726369, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6254985943049329, + "acc,all": 0.6519607843137255, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.5728700306043661, + "mse,all": 0.7793033088235294, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5201668984700973, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2020": 0.6181818181818182, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2022": 0.5714285714285714, + "acc,exam_id__UNICAMP_2019": 0.52, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.58222533240028, + "acc,exam_id__2013": 0.5925925925925926, + "acc,exam_id__2023": 0.6148148148148148, + "acc,exam_id__2015": 0.5882352941176471, + "acc,exam_id__2014": 0.5871559633027523, + "acc,exam_id__2016": 0.5454545454545454, + "acc,exam_id__2017": 0.6206896551724138, + "acc,exam_id__2010": 0.5641025641025641, + "acc,exam_id__2012": 0.5344827586206896, + "acc,exam_id__2011": 0.6324786324786325, + "acc,exam_id__2022": 0.5714285714285714, + "acc,exam_id__2016_2": 0.5609756097560976, + "acc,exam_id__2009": 0.5739130434782609 + }, + "faquad_nli": { + "f1_macro,all": 0.5516489252623926, + "acc,all": 0.7723076923076924, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6730588787239034, + "acc,all": 0.6814285714285714 + }, + "oab_exams": { + "acc,all": 0.39726651480637815, + "acc,exam_id__2017-23": 0.325, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2012-06": 0.3875, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2014-13": 0.3625, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2013-10": 0.325, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2012-06a": 0.375, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2016-20": 0.3375, + "acc,exam_id__2017-22": 0.4875, + "acc,exam_id__2015-17": 0.47435897435897434, + "acc,exam_id__2017-24": 0.375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.575891283472511, + "acc,all": 0.6145710928319624 + }, + "tweetsentbr": { + "f1_macro,all": 0.5263769536717947, + "acc,all": 0.5562189054726369, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e89f82f2afa1961335de5a6d6d05bd850d1d61d9", - "model_dtype": "torch.float16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e89f82f2afa1961335de5a6d6d05bd850d1d61d9", + "model_dtype": "torch.float16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=dzakwan/dzakwan-MoE-4x7b-Beta,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=dzakwan/dzakwan-MoE-4x7b-Beta,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "f2a0116" + "git_hash": "f2a0116" } \ No newline at end of file diff --git a/dzakwan/dzakwan-MoE-4x7b-Beta/results_2024-06-13T02-34-28.888859.json b/dzakwan/dzakwan-MoE-4x7b-Beta/results_2024-06-13T02-34-28.888859.json index 4fc98e84449a8e58a499279341bab0772b6965e3..e94ac9d5f9f0d9884fa0727e13a3bd946adb80ca 100644 --- a/dzakwan/dzakwan-MoE-4x7b-Beta/results_2024-06-13T02-34-28.888859.json +++ b/dzakwan/dzakwan-MoE-4x7b-Beta/results_2024-06-13T02-34-28.888859.json @@ -34,13 +34,13 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5351670978090755, - "all_grouped_npm": 0.2738871366885911, + "all_grouped_average": 0.5583337124129618, + "all_grouped_npm": 0.32022036589636393, "all_grouped": { "enem_challenge": 0.58222533240028, "bluex": 0.5201668984700973, "oab_exams": 0.39726651480637815, - "assin2_rte": 0.41699906286995536, + "assin2_rte": 0.6254985943049329, "assin2_sts": 0.5728700306043661, "faquad_nli": 0.5516489252623926, "hatebr_offensive": 0.6730588787239034, @@ -51,7 +51,7 @@ "harness|enem_challenge|enem_challenge|None|3": 0.58222533240028, "harness|bluex|bluex|None|3": 0.5201668984700973, "harness|oab_exams|oab_exams|None|3": 0.39726651480637815, - "harness|assin2_rte|assin2_rte|None|15": 0.41699906286995536, + "harness|assin2_rte|assin2_rte|None|15": 0.6254985943049329, "harness|assin2_sts|assin2_sts|None|15": 0.5728700306043661, "harness|faquad_nli|faquad_nli|None|15": 0.5516489252623926, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6730588787239034, @@ -125,9 +125,9 @@ "main_score": 0.39726651480637815 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.41699906286995536, + "f1_macro,all": 0.6254985943049329, "acc,all": 0.6519607843137255, - "main_score": 0.41699906286995536 + "main_score": 0.6254985943049329 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.5728700306043661, diff --git a/eduagarcia/gemma-7b-it_no_chat_template/raw_2024-02-23T22-19-59.816101/results.json b/eduagarcia/gemma-7b-it_no_chat_template/raw_2024-02-23T22-19-59.816101/results.json index 54386bb2d51ed564ccc74e800f947989a246570b..51396c0212084706d817d220003f05bcb889f80b 100644 --- a/eduagarcia/gemma-7b-it_no_chat_template/raw_2024-02-23T22-19-59.816101/results.json +++ b/eduagarcia/gemma-7b-it_no_chat_template/raw_2024-02-23T22-19-59.816101/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8471090065645221, - "acc,all": 0.8476307189542484, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6824898498454515, - "mse,all": 0.5544934640522875, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.36022253129346316, - "acc,exam_id__UNICAMP_2019": 0.32, - "acc,exam_id__USP_2022": 0.3673469387755102, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__USP_2023": 0.36363636363636365, - "acc,exam_id__USP_2024": 0.43902439024390244, - "acc,exam_id__UNICAMP_2022": 0.46153846153846156, - "acc,exam_id__UNICAMP_2023": 0.46511627906976744, - "acc,exam_id__USP_2020": 0.42857142857142855, - "acc,exam_id__USP_2018": 0.25925925925925924, - "acc,exam_id__USP_2021": 0.38461538461538464, - "acc,exam_id__UNICAMP_2018": 0.24074074074074073, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__UNICAMP_2021_2": 0.29411764705882354, - "acc,exam_id__UNICAMP_2021_1": 0.34782608695652173, - "acc,exam_id__UNICAMP_2020": 0.38181818181818183, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.470258922323303, - "acc,exam_id__2023": 0.5407407407407407, - "acc,exam_id__2009": 0.45217391304347826, - "acc,exam_id__2013": 0.4166666666666667, - "acc,exam_id__2012": 0.4482758620689655, - "acc,exam_id__2016": 0.4628099173553719, - "acc,exam_id__2017": 0.4224137931034483, - "acc,exam_id__2014": 0.44954128440366975, - "acc,exam_id__2011": 0.5384615384615384, - "acc,exam_id__2010": 0.48717948717948717, - "acc,exam_id__2015": 0.40336134453781514, - "acc,exam_id__2016_2": 0.4796747967479675, - "acc,exam_id__2022": 0.518796992481203 - }, - "faquad_nli": { - "f1_macro,all": 0.38090214105688, - "acc,all": 0.5676923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8272202378666258, - "acc,all": 0.8278571428571428 - }, - "oab_exams": { - "acc,all": 0.35170842824601367, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2015-17": 0.38461538461538464, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2014-15": 0.41025641025641024, - "acc,exam_id__2016-20": 0.3375, - "acc,exam_id__2017-24": 0.2375, - "acc,exam_id__2016-19": 0.4230769230769231, - "acc,exam_id__2010-02": 0.35, - "acc,exam_id__2017-22": 0.4375, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2018-25": 0.35, - "acc,exam_id__2014-14": 0.35, - "acc,exam_id__2010-01": 0.3176470588235294, - "acc,exam_id__2015-16": 0.3, - "acc,exam_id__2011-05": 0.2875, - "acc,exam_id__2013-11": 0.35, - "acc,exam_id__2013-12": 0.35, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2017-23": 0.3375, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2016-21": 0.325, - "acc,exam_id__2015-18": 0.3375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.4432352615659635, - "acc,all": 0.4512338425381904 - }, - "tweetsentbr": { - "f1_macro,all": 0.601954964588851, - "acc,all": 0.6248756218905472, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8471090065645221, + "acc,all": 0.8476307189542484, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6824898498454515, + "mse,all": 0.5544934640522875, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.36022253129346316, + "acc,exam_id__UNICAMP_2019": 0.32, + "acc,exam_id__USP_2022": 0.3673469387755102, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__USP_2023": 0.36363636363636365, + "acc,exam_id__USP_2024": 0.43902439024390244, + "acc,exam_id__UNICAMP_2022": 0.46153846153846156, + "acc,exam_id__UNICAMP_2023": 0.46511627906976744, + "acc,exam_id__USP_2020": 0.42857142857142855, + "acc,exam_id__USP_2018": 0.25925925925925924, + "acc,exam_id__USP_2021": 0.38461538461538464, + "acc,exam_id__UNICAMP_2018": 0.24074074074074073, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__UNICAMP_2021_2": 0.29411764705882354, + "acc,exam_id__UNICAMP_2021_1": 0.34782608695652173, + "acc,exam_id__UNICAMP_2020": 0.38181818181818183, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.470258922323303, + "acc,exam_id__2023": 0.5407407407407407, + "acc,exam_id__2009": 0.45217391304347826, + "acc,exam_id__2013": 0.4166666666666667, + "acc,exam_id__2012": 0.4482758620689655, + "acc,exam_id__2016": 0.4628099173553719, + "acc,exam_id__2017": 0.4224137931034483, + "acc,exam_id__2014": 0.44954128440366975, + "acc,exam_id__2011": 0.5384615384615384, + "acc,exam_id__2010": 0.48717948717948717, + "acc,exam_id__2015": 0.40336134453781514, + "acc,exam_id__2016_2": 0.4796747967479675, + "acc,exam_id__2022": 0.518796992481203 + }, + "faquad_nli": { + "f1_macro,all": 0.5713532115853199, + "acc,all": 0.5676923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8272202378666258, + "acc,all": 0.8278571428571428 + }, + "oab_exams": { + "acc,all": 0.35170842824601367, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2015-17": 0.38461538461538464, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2014-15": 0.41025641025641024, + "acc,exam_id__2016-20": 0.3375, + "acc,exam_id__2017-24": 0.2375, + "acc,exam_id__2016-19": 0.4230769230769231, + "acc,exam_id__2010-02": 0.35, + "acc,exam_id__2017-22": 0.4375, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2018-25": 0.35, + "acc,exam_id__2014-14": 0.35, + "acc,exam_id__2010-01": 0.3176470588235294, + "acc,exam_id__2015-16": 0.3, + "acc,exam_id__2011-05": 0.2875, + "acc,exam_id__2013-11": 0.35, + "acc,exam_id__2013-12": 0.35, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2017-23": 0.3375, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2016-21": 0.325, + "acc,exam_id__2015-18": 0.3375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4432352615659635, + "acc,all": 0.4512338425381904 + }, + "tweetsentbr": { + "f1_macro,all": 0.601954964588851, + "acc,all": 0.6248756218905472, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c118185bcd59d54c263108c24ddeadd70f577435", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 17612232704, - "model_num_parameters": 8537680896, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1008.8839869281046, - "min_seq_length": 994, - "max_seq_length": 1051, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1217.8839869281046, - "min_seq_length": 1203, - "max_seq_length": 1260, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1373.4464534075105, - "min_seq_length": 1080, - "max_seq_length": 1993, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1215.8278516445066, - "min_seq_length": 1019, - "max_seq_length": 2062, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1094.1338461538462, - "min_seq_length": 1056, - "max_seq_length": 1167, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 896.505, - "min_seq_length": 881, - "max_seq_length": 1085, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c118185bcd59d54c263108c24ddeadd70f577435", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 17612232704, + "model_num_parameters": 8537680896, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 987.4004555808656, - "min_seq_length": 791, - "max_seq_length": 1322, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1008.8839869281046, + "min_seq_length": 994, + "max_seq_length": 1051, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1217.8839869281046, + "min_seq_length": 1203, + "max_seq_length": 1260, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1373.4464534075105, + "min_seq_length": 1080, + "max_seq_length": 1993, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1215.8278516445066, + "min_seq_length": 1019, + "max_seq_length": 2062, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1094.1338461538462, + "min_seq_length": 1056, + "max_seq_length": 1167, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 896.505, + "min_seq_length": 881, + "max_seq_length": 1085, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 987.4004555808656, + "min_seq_length": 791, + "max_seq_length": 1322, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1272.13866039953, + "min_seq_length": 1244, + "max_seq_length": 1303, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1101.9791044776118, + "min_seq_length": 1087, + "max_seq_length": 1144, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1272.13866039953, - "min_seq_length": 1244, - "max_seq_length": 1303, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=eduagarcia/gemma-7b-it_no_chat_template,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1101.9791044776118, - "min_seq_length": 1087, - "max_seq_length": 1144, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=eduagarcia/gemma-7b-it_no_chat_template,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/eduagarcia/gemma-7b-it_no_chat_template/results_2024-02-23T22-19-59.816101.json b/eduagarcia/gemma-7b-it_no_chat_template/results_2024-02-23T22-19-59.816101.json index 0f3cea92a50b69309ab45f829726f4ac2916aeb8..ffc9d287e61e8b58f2d95fa98bbcf6d7b110dd6b 100644 --- a/eduagarcia/gemma-7b-it_no_chat_template/results_2024-02-23T22-19-59.816101.json +++ b/eduagarcia/gemma-7b-it_no_chat_template/results_2024-02-23T22-19-59.816101.json @@ -34,15 +34,15 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5516779270390081, - "all_grouped_npm": 0.32000456969789404, + "all_grouped_average": 0.5728391570977237, + "all_grouped_npm": 0.35890388965876824, "all_grouped": { "enem_challenge": 0.470258922323303, "bluex": 0.36022253129346316, "oab_exams": 0.35170842824601367, "assin2_rte": 0.8471090065645221, "assin2_sts": 0.6824898498454515, - "faquad_nli": 0.38090214105688, + "faquad_nli": 0.5713532115853199, "hatebr_offensive": 0.8272202378666258, "portuguese_hate_speech": 0.4432352615659635, "tweetsentbr": 0.601954964588851 @@ -53,7 +53,7 @@ "harness|oab_exams|oab_exams|None|3": 0.35170842824601367, "harness|assin2_rte|assin2_rte|None|15": 0.8471090065645221, "harness|assin2_sts|assin2_sts|None|15": 0.6824898498454515, - "harness|faquad_nli|faquad_nli|None|15": 0.38090214105688, + "harness|faquad_nli|faquad_nli|None|15": 0.5713532115853199, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8272202378666258, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4432352615659635, "harness|tweetsentbr|tweetsentbr|None|25": 0.601954964588851 @@ -135,9 +135,9 @@ "main_score": 0.6824898498454515 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.38090214105688, + "f1_macro,all": 0.5713532115853199, "acc,all": 0.5676923076923077, - "main_score": 0.38090214105688 + "main_score": 0.5713532115853199 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.8272202378666258, diff --git a/eldogbbhed/Peagle-9b/raw_2024-05-19T21-23-39.265807/results.json b/eldogbbhed/Peagle-9b/raw_2024-05-19T21-23-39.265807/results.json index c38942164280067d029c31f35faf8ce0e69735f1..307eabc8874409442295a43a76b530b435b182d3 100644 --- a/eldogbbhed/Peagle-9b/raw_2024-05-19T21-23-39.265807/results.json +++ b/eldogbbhed/Peagle-9b/raw_2024-05-19T21-23-39.265807/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.5784120668038546, - "acc,all": 0.6237745098039216, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.5522260924205177, - "mse,all": 0.7922879356651821, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.4993045897079277, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__UNICAMP_2023": 0.5116279069767442, - "acc,exam_id__USP_2023": 0.5454545454545454, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__UNICAMP_2024": 0.4222222222222222, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2024": 0.6585365853658537, - "acc,exam_id__UNICAMP_2019": 0.48, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5640307907627712, - "acc,exam_id__2016": 0.5371900826446281, - "acc,exam_id__2013": 0.5833333333333334, - "acc,exam_id__2011": 0.5982905982905983, - "acc,exam_id__2009": 0.5391304347826087, - "acc,exam_id__2016_2": 0.6097560975609756, - "acc,exam_id__2022": 0.5338345864661654, - "acc,exam_id__2014": 0.5412844036697247, - "acc,exam_id__2015": 0.5546218487394958, - "acc,exam_id__2017": 0.5775862068965517, - "acc,exam_id__2010": 0.5811965811965812, - "acc,exam_id__2012": 0.5086206896551724, - "acc,exam_id__2023": 0.6 - }, - "faquad_nli": { - "f1_macro,all": 0.5708602851459994, - "acc,all": 0.7569230769230769, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.5393188795961035, - "acc,all": 0.6021428571428571 - }, - "oab_exams": { - "acc,all": 0.3958997722095672, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2017-22": 0.45, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2018-25": 0.375, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2014-13": 0.3625, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2017-23": 0.3875, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2012-08": 0.325, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2012-06": 0.475, - "acc,exam_id__2014-14": 0.4625, - "acc,exam_id__2013-10": 0.3125, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2016-20a": 0.35, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5769279068047211, - "acc,all": 0.6603995299647474 - }, - "tweetsentbr": { - "f1_macro,all": 0.39313664705263035, - "acc,all": 0.5437810945273632, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.5784120668038546, + "acc,all": 0.6237745098039216, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.5522260924205177, + "mse,all": 0.7922879356651821, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.4993045897079277, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__UNICAMP_2023": 0.5116279069767442, + "acc,exam_id__USP_2023": 0.5454545454545454, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__UNICAMP_2024": 0.4222222222222222, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2024": 0.6585365853658537, + "acc,exam_id__UNICAMP_2019": 0.48, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5640307907627712, + "acc,exam_id__2016": 0.5371900826446281, + "acc,exam_id__2013": 0.5833333333333334, + "acc,exam_id__2011": 0.5982905982905983, + "acc,exam_id__2009": 0.5391304347826087, + "acc,exam_id__2016_2": 0.6097560975609756, + "acc,exam_id__2022": 0.5338345864661654, + "acc,exam_id__2014": 0.5412844036697247, + "acc,exam_id__2015": 0.5546218487394958, + "acc,exam_id__2017": 0.5775862068965517, + "acc,exam_id__2010": 0.5811965811965812, + "acc,exam_id__2012": 0.5086206896551724, + "acc,exam_id__2023": 0.6 + }, + "faquad_nli": { + "f1_macro,all": 0.5708602851459994, + "acc,all": 0.7569230769230769, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.5393188795961035, + "acc,all": 0.6021428571428571 + }, + "oab_exams": { + "acc,all": 0.3958997722095672, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2017-22": 0.45, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2018-25": 0.375, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2014-13": 0.3625, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2017-23": 0.3875, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2012-08": 0.325, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2012-06": 0.475, + "acc,exam_id__2014-14": 0.4625, + "acc,exam_id__2013-10": 0.3125, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2016-20a": 0.35, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5769279068047211, + "acc,all": 0.6603995299647474 + }, + "tweetsentbr": { + "f1_macro,all": 0.5241821960701739, + "acc,all": 0.5437810945273632, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "3367f742a38a3d7a44594088d8805e781fd34136", - "model_dtype": "torch.float16", - "model_memory_footprint": 18644355072, - "model_num_parameters": 8986628096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "3367f742a38a3d7a44594088d8805e781fd34136", + "model_dtype": "torch.float16", + "model_memory_footprint": 18644355072, + "model_num_parameters": 8986628096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=eldogbbhed/Peagle-9b,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=eldogbbhed/Peagle-9b,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/eldogbbhed/Peagle-9b/results_2024-05-19T21-23-39.265807.json b/eldogbbhed/Peagle-9b/results_2024-05-19T21-23-39.265807.json index 70eb5c0cf0576b9a1cd15801c4aba42d3dcbe81f..db7944df69b76a0421f15773f4f9b8aefb7e0115 100644 --- a/eldogbbhed/Peagle-9b/results_2024-05-19T21-23-39.265807.json +++ b/eldogbbhed/Peagle-9b/results_2024-05-19T21-23-39.265807.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5189018922782326, - "all_grouped_npm": 0.25413690079111956, + "all_grouped_average": 0.5334625088357374, + "all_grouped_npm": 0.27580448495407317, "all_grouped": { "enem_challenge": 0.5640307907627712, "bluex": 0.4993045897079277, @@ -45,7 +45,7 @@ "faquad_nli": 0.5708602851459994, "hatebr_offensive": 0.5393188795961035, "portuguese_hate_speech": 0.5769279068047211, - "tweetsentbr": 0.39313664705263035 + "tweetsentbr": 0.5241821960701739 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5640307907627712, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.5708602851459994, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5393188795961035, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5769279068047211, - "harness|tweetsentbr|tweetsentbr|None|25": 0.39313664705263035 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5241821960701739 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5640307907627712, @@ -150,9 +150,9 @@ "main_score": 0.5769279068047211 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.39313664705263035, + "f1_macro,all": 0.5241821960701739, "acc,all": 0.5437810945273632, - "main_score": 0.39313664705263035 + "main_score": 0.5241821960701739 } }, "config_tasks": { diff --git a/failspy/Meta-Llama-3-8B-Instruct-abliterated-v3/raw_2024-05-27T12-30-50.126635/results.json b/failspy/Meta-Llama-3-8B-Instruct-abliterated-v3/raw_2024-05-27T12-30-50.126635/results.json index 99fddf87240d756f1e5a2ce9f99dd73a95716c7c..1199b559367365b21c70e0da43e8c9be6ee6d430 100644 --- a/failspy/Meta-Llama-3-8B-Instruct-abliterated-v3/raw_2024-05-27T12-30-50.126635/results.json +++ b/failspy/Meta-Llama-3-8B-Instruct-abliterated-v3/raw_2024-05-27T12-30-50.126635/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9073768010736127, - "acc,all": 0.9076797385620915, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7546147599967769, - "mse,all": 0.5846772875816993, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5799721835883171, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2023": 0.6976744186046512, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__USP_2024": 0.6097560975609756, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2021": 0.6153846153846154, - "acc,exam_id__USP_2019": 0.6, - "acc,exam_id__USP_2022": 0.5714285714285714, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7067879636109167, - "acc,exam_id__2011": 0.7350427350427351, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2015": 0.7226890756302521, - "acc,exam_id__2016": 0.6776859504132231, - "acc,exam_id__2016_2": 0.6829268292682927, - "acc,exam_id__2009": 0.7217391304347827, - "acc,exam_id__2012": 0.7068965517241379, - "acc,exam_id__2010": 0.717948717948718, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2014": 0.7064220183486238, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2023": 0.7851851851851852 - }, - "faquad_nli": { - "f1_macro,all": 0.755329706053451, - "acc,all": 0.796923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.869595212895539, - "acc,all": 0.87 - }, - "oab_exams": { - "acc,all": 0.5020501138952164, - "acc,exam_id__2011-03": 0.43434343434343436, - "acc,exam_id__2014-13": 0.4375, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2017-24": 0.4125, - "acc,exam_id__2017-22": 0.5875, - "acc,exam_id__2012-06a": 0.5375, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2015-16": 0.475, - "acc,exam_id__2011-04": 0.4875, - "acc,exam_id__2012-07": 0.5, - "acc,exam_id__2014-14": 0.575, - "acc,exam_id__2014-15": 0.5384615384615384, - "acc,exam_id__2010-02": 0.57, - "acc,exam_id__2015-18": 0.5, - "acc,exam_id__2016-19": 0.5512820512820513, - "acc,exam_id__2012-06": 0.5625, - "acc,exam_id__2013-12": 0.575, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2017-23": 0.5125, - "acc,exam_id__2013-11": 0.5375, - "acc,exam_id__2016-20": 0.5125, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2018-25": 0.5125, - "acc,exam_id__2010-01": 0.4, - "acc,exam_id__2012-08": 0.5625, - "acc,exam_id__2015-17": 0.6410256410256411, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.623237436514301, - "acc,all": 0.6263219741480611 - }, - "tweetsentbr": { - "f1_macro,all": 0.49494809257815886, - "acc,all": 0.7199004975124378, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9073768010736127, + "acc,all": 0.9076797385620915, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7546147599967769, + "mse,all": 0.5846772875816993, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5799721835883171, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2023": 0.6976744186046512, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__USP_2024": 0.6097560975609756, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2021": 0.6153846153846154, + "acc,exam_id__USP_2019": 0.6, + "acc,exam_id__USP_2022": 0.5714285714285714, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7067879636109167, + "acc,exam_id__2011": 0.7350427350427351, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2015": 0.7226890756302521, + "acc,exam_id__2016": 0.6776859504132231, + "acc,exam_id__2016_2": 0.6829268292682927, + "acc,exam_id__2009": 0.7217391304347827, + "acc,exam_id__2012": 0.7068965517241379, + "acc,exam_id__2010": 0.717948717948718, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2014": 0.7064220183486238, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2023": 0.7851851851851852 + }, + "faquad_nli": { + "f1_macro,all": 0.755329706053451, + "acc,all": 0.796923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.869595212895539, + "acc,all": 0.87 + }, + "oab_exams": { + "acc,all": 0.5020501138952164, + "acc,exam_id__2011-03": 0.43434343434343436, + "acc,exam_id__2014-13": 0.4375, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2017-24": 0.4125, + "acc,exam_id__2017-22": 0.5875, + "acc,exam_id__2012-06a": 0.5375, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2015-16": 0.475, + "acc,exam_id__2011-04": 0.4875, + "acc,exam_id__2012-07": 0.5, + "acc,exam_id__2014-14": 0.575, + "acc,exam_id__2014-15": 0.5384615384615384, + "acc,exam_id__2010-02": 0.57, + "acc,exam_id__2015-18": 0.5, + "acc,exam_id__2016-19": 0.5512820512820513, + "acc,exam_id__2012-06": 0.5625, + "acc,exam_id__2013-12": 0.575, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2017-23": 0.5125, + "acc,exam_id__2013-11": 0.5375, + "acc,exam_id__2016-20": 0.5125, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2018-25": 0.5125, + "acc,exam_id__2010-01": 0.4, + "acc,exam_id__2012-08": 0.5625, + "acc,exam_id__2015-17": 0.6410256410256411, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.623237436514301, + "acc,all": 0.6263219741480611 + }, + "tweetsentbr": { + "f1_macro,all": 0.6599307901042119, + "acc,all": 0.7199004975124378, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "43ac6fc3c097874a05a1460c054992c591a9a553", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530688, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "43ac6fc3c097874a05a1460c054992c591a9a553", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530688, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=failspy/Meta-Llama-3-8B-Instruct-abliterated-v3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=failspy/Meta-Llama-3-8B-Instruct-abliterated-v3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/failspy/Meta-Llama-3-8B-Instruct-abliterated-v3/results_2024-05-27T12-30-50.126635.json b/failspy/Meta-Llama-3-8B-Instruct-abliterated-v3/results_2024-05-27T12-30-50.126635.json index 62c931785effd312e662707eb6c7201d87fb31d2..d179b0c3918a827bc3cbb5311bf1902ea84b4884 100644 --- a/failspy/Meta-Llama-3-8B-Instruct-abliterated-v3/results_2024-05-27T12-30-50.126635.json +++ b/failspy/Meta-Llama-3-8B-Instruct-abliterated-v3/results_2024-05-27T12-30-50.126635.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6882124744673654, - "all_grouped_npm": 0.5346288404173541, + "all_grouped_average": 0.7065438853035935, + "all_grouped_npm": 0.5619077255903127, "all_grouped": { "enem_challenge": 0.7067879636109167, "bluex": 0.5799721835883171, @@ -45,7 +45,7 @@ "faquad_nli": 0.755329706053451, "hatebr_offensive": 0.869595212895539, "portuguese_hate_speech": 0.623237436514301, - "tweetsentbr": 0.49494809257815886 + "tweetsentbr": 0.6599307901042119 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7067879636109167, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.755329706053451, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.869595212895539, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.623237436514301, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49494809257815886 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6599307901042119 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7067879636109167, @@ -150,9 +150,9 @@ "main_score": 0.623237436514301 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49494809257815886, + "f1_macro,all": 0.6599307901042119, "acc,all": 0.7199004975124378, - "main_score": 0.49494809257815886 + "main_score": 0.6599307901042119 } }, "config_tasks": { diff --git a/failspy/Phi-3-medium-4k-instruct-abliterated-v3/raw_2024-05-25T08-02-58.396353/results.json b/failspy/Phi-3-medium-4k-instruct-abliterated-v3/raw_2024-05-25T08-02-58.396353/results.json index a7974156608846e5ecafe3e1f592b40e2dd60f67..01442df1021f9cbb6e3d2e7fd0044115099b9aa6 100644 --- a/failspy/Phi-3-medium-4k-instruct-abliterated-v3/raw_2024-05-25T08-02-58.396353/results.json +++ b/failspy/Phi-3-medium-4k-instruct-abliterated-v3/raw_2024-05-25T08-02-58.396353/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9256174008161892, - "acc,all": 0.9256535947712419, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8022099861362467, - "mse,all": 0.4308006535947712, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5924895688456189, - "acc,exam_id__USP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__UNICAMP_2022": 0.7435897435897436, - "acc,exam_id__USP_2019": 0.525, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__USP_2023": 0.75, - "acc,exam_id__UNICAMP_2020": 0.6181818181818182, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6962911126662001, - "acc,exam_id__2011": 0.7435897435897436, - "acc,exam_id__2022": 0.6842105263157895, - "acc,exam_id__2014": 0.7155963302752294, - "acc,exam_id__2012": 0.6810344827586207, - "acc,exam_id__2015": 0.680672268907563, - "acc,exam_id__2016_2": 0.6829268292682927, - "acc,exam_id__2016": 0.7272727272727273, - "acc,exam_id__2010": 0.7094017094017094, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2013": 0.6574074074074074, - "acc,exam_id__2023": 0.725925925925926, - "acc,exam_id__2017": 0.7241379310344828 - }, - "faquad_nli": { - "f1_macro,all": 0.7299937983407636, - "acc,all": 0.7815384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7935385931129845, - "acc,all": 0.8007142857142857 - }, - "oab_exams": { - "acc,all": 0.47380410022779046, - "acc,exam_id__2014-15": 0.5769230769230769, - "acc,exam_id__2013-10": 0.4875, - "acc,exam_id__2016-20": 0.45, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2012-07": 0.45, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2011-03": 0.4444444444444444, - "acc,exam_id__2015-17": 0.5384615384615384, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2015-16": 0.4125, - "acc,exam_id__2011-04": 0.35, - "acc,exam_id__2013-11": 0.525, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2017-23": 0.475, - "acc,exam_id__2014-13": 0.5125, - "acc,exam_id__2011-05": 0.5625, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2014-14": 0.5375, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2017-22": 0.475, - "acc,exam_id__2012-08": 0.525, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2010-01": 0.43529411764705883, - "acc,exam_id__2017-24": 0.55, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2015-18": 0.5, - "acc,exam_id__2012-06": 0.4625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.720937858665355, - "acc,all": 0.7931844888366627 - }, - "tweetsentbr": { - "f1_macro,all": 0.4683637947210867, - "acc,all": 0.6756218905472637, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9256174008161892, + "acc,all": 0.9256535947712419, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8022099861362467, + "mse,all": 0.4308006535947712, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5924895688456189, + "acc,exam_id__USP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__UNICAMP_2022": 0.7435897435897436, + "acc,exam_id__USP_2019": 0.525, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__USP_2023": 0.75, + "acc,exam_id__UNICAMP_2020": 0.6181818181818182, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6962911126662001, + "acc,exam_id__2011": 0.7435897435897436, + "acc,exam_id__2022": 0.6842105263157895, + "acc,exam_id__2014": 0.7155963302752294, + "acc,exam_id__2012": 0.6810344827586207, + "acc,exam_id__2015": 0.680672268907563, + "acc,exam_id__2016_2": 0.6829268292682927, + "acc,exam_id__2016": 0.7272727272727273, + "acc,exam_id__2010": 0.7094017094017094, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2013": 0.6574074074074074, + "acc,exam_id__2023": 0.725925925925926, + "acc,exam_id__2017": 0.7241379310344828 + }, + "faquad_nli": { + "f1_macro,all": 0.7299937983407636, + "acc,all": 0.7815384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7935385931129845, + "acc,all": 0.8007142857142857 + }, + "oab_exams": { + "acc,all": 0.47380410022779046, + "acc,exam_id__2014-15": 0.5769230769230769, + "acc,exam_id__2013-10": 0.4875, + "acc,exam_id__2016-20": 0.45, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2012-07": 0.45, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2011-03": 0.4444444444444444, + "acc,exam_id__2015-17": 0.5384615384615384, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2015-16": 0.4125, + "acc,exam_id__2011-04": 0.35, + "acc,exam_id__2013-11": 0.525, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2017-23": 0.475, + "acc,exam_id__2014-13": 0.5125, + "acc,exam_id__2011-05": 0.5625, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2014-14": 0.5375, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2017-22": 0.475, + "acc,exam_id__2012-08": 0.525, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2010-01": 0.43529411764705883, + "acc,exam_id__2017-24": 0.55, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2015-18": 0.5, + "acc,exam_id__2012-06": 0.4625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.720937858665355, + "acc,all": 0.7931844888366627 + }, + "tweetsentbr": { + "f1_macro,all": 0.6244850596281156, + "acc,all": 0.6756218905472637, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "959b09eacf6cae85a8eb21b25e998addc89a367b", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 27920476160, - "model_num_parameters": 13960238080, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1268.9889705882354, - "min_seq_length": 1246, - "max_seq_length": 1335, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1499.9889705882354, - "min_seq_length": 1477, - "max_seq_length": 1566, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1609.7426981919332, - "min_seq_length": 1243, - "max_seq_length": 2369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1488.9881035689293, - "min_seq_length": 1236, - "max_seq_length": 2528, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1478.1184615384616, - "min_seq_length": 1426, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "959b09eacf6cae85a8eb21b25e998addc89a367b", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 27920476160, + "model_num_parameters": 13960238080, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1262.9178571428572, - "min_seq_length": 1239, - "max_seq_length": 1509, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1258.4145785876992, - "min_seq_length": 1003, - "max_seq_length": 1740, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1268.9889705882354, + "min_seq_length": 1246, + "max_seq_length": 1335, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1499.9889705882354, + "min_seq_length": 1477, + "max_seq_length": 1566, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1609.7426981919332, + "min_seq_length": 1243, + "max_seq_length": 2369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1488.9881035689293, + "min_seq_length": 1236, + "max_seq_length": 2528, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1478.1184615384616, + "min_seq_length": 1426, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1262.9178571428572, + "min_seq_length": 1239, + "max_seq_length": 1509, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1258.4145785876992, + "min_seq_length": 1003, + "max_seq_length": 1740, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1751.801410105758, + "min_seq_length": 1717, + "max_seq_length": 1795, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1518.6845771144278, + "min_seq_length": 1497, + "max_seq_length": 1636, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1751.801410105758, - "min_seq_length": 1717, - "max_seq_length": 1795, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=failspy/Phi-3-medium-4k-instruct-abliterated-v3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1518.6845771144278, - "min_seq_length": 1497, - "max_seq_length": 1636, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=failspy/Phi-3-medium-4k-instruct-abliterated-v3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/failspy/Phi-3-medium-4k-instruct-abliterated-v3/results_2024-05-25T08-02-58.396353.json b/failspy/Phi-3-medium-4k-instruct-abliterated-v3/results_2024-05-25T08-02-58.396353.json index 85bc58138fef0980108ca994425eb81308f3001f..dc4d3b4be5d16ee23de435ad1d7bc8db92498253 100644 --- a/failspy/Phi-3-medium-4k-instruct-abliterated-v3/results_2024-05-25T08-02-58.396353.json +++ b/failspy/Phi-3-medium-4k-instruct-abliterated-v3/results_2024-05-25T08-02-58.396353.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6892495792813594, - "all_grouped_npm": 0.5344870305782643, + "all_grouped_average": 0.7065963864932515, + "all_grouped_npm": 0.5603007317864371, "all_grouped": { "enem_challenge": 0.6962911126662001, "bluex": 0.5924895688456189, @@ -45,7 +45,7 @@ "faquad_nli": 0.7299937983407636, "hatebr_offensive": 0.7935385931129845, "portuguese_hate_speech": 0.720937858665355, - "tweetsentbr": 0.4683637947210867 + "tweetsentbr": 0.6244850596281156 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6962911126662001, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7299937983407636, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7935385931129845, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.720937858665355, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4683637947210867 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6244850596281156 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6962911126662001, @@ -150,9 +150,9 @@ "main_score": 0.720937858665355 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4683637947210867, + "f1_macro,all": 0.6244850596281156, "acc,all": 0.6756218905472637, - "main_score": 0.4683637947210867 + "main_score": 0.6244850596281156 } }, "config_tasks": { diff --git a/freewheelin/free-solar-evo-v0.1/raw_2024-08-11T12-35-46.464008/results.json b/freewheelin/free-solar-evo-v0.1/raw_2024-08-11T12-35-46.464008/results.json index b6e54824491380bc963fb67112b1115f64007b6b..74710b10495ee450dc9b70c47fac86ba21fa81bd 100644 --- a/freewheelin/free-solar-evo-v0.1/raw_2024-08-11T12-35-46.464008/results.json +++ b/freewheelin/free-solar-evo-v0.1/raw_2024-08-11T12-35-46.464008/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.4015759802167569, - "acc,all": 0.5020424836601307, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.655402578466106, - "mse,all": 1.0533249392699562, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5479833101529903, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, - "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__UNICAMP_2023": 0.6511627906976745, - "acc,exam_id__USP_2018": 0.4074074074074074, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2019": 0.375, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6207137858642408, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2010": 0.6495726495726496, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2015": 0.6722689075630253, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2014": 0.5779816513761468, - "acc,exam_id__2022": 0.5939849624060151, - "acc,exam_id__2012": 0.6293103448275862, - "acc,exam_id__2023": 0.6222222222222222, - "acc,exam_id__2016_2": 0.5691056910569106, - "acc,exam_id__2016": 0.5289256198347108, - "acc,exam_id__2013": 0.6018518518518519 - }, - "faquad_nli": { - "f1_macro,all": 0.2370033914728682, - "acc,all": 0.2569230769230769, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.22757486049436768, - "acc,all": 0.22142857142857142 - }, - "oab_exams": { - "acc,all": 0.47425968109339406, - "acc,exam_id__2017-24": 0.475, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2015-18": 0.525, - "acc,exam_id__2018-25": 0.5, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2016-20": 0.5375, - "acc,exam_id__2016-21": 0.45, - "acc,exam_id__2011-04": 0.4375, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2010-02": 0.56, - "acc,exam_id__2013-10": 0.4875, - "acc,exam_id__2012-07": 0.475, - "acc,exam_id__2015-17": 0.6025641025641025, - "acc,exam_id__2013-11": 0.4875, - "acc,exam_id__2012-09": 0.38961038961038963, - "acc,exam_id__2015-16": 0.4125, - "acc,exam_id__2012-08": 0.4875, - "acc,exam_id__2011-03": 0.5353535353535354, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2014-13": 0.4625, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2014-14": 0.475, - "acc,exam_id__2012-06a": 0.475, - "acc,exam_id__2017-22": 0.4625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.2954248366013072, - "acc,all": 0.28672150411280845 - }, - "tweetsentbr": { - "f1_macro,all": 0.46523264622180927, - "acc,all": 0.6597014925373135, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6023639703251354, + "acc,all": 0.5020424836601307, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.655402578466106, + "mse,all": 1.0533249392699562, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5479833101529903, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, + "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__UNICAMP_2023": 0.6511627906976745, + "acc,exam_id__USP_2018": 0.4074074074074074, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2019": 0.375, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6207137858642408, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2010": 0.6495726495726496, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2015": 0.6722689075630253, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2014": 0.5779816513761468, + "acc,exam_id__2022": 0.5939849624060151, + "acc,exam_id__2012": 0.6293103448275862, + "acc,exam_id__2023": 0.6222222222222222, + "acc,exam_id__2016_2": 0.5691056910569106, + "acc,exam_id__2016": 0.5289256198347108, + "acc,exam_id__2013": 0.6018518518518519 + }, + "faquad_nli": { + "f1_macro,all": 0.3555050872093023, + "acc,all": 0.2569230769230769, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.34136229074155156, + "acc,all": 0.22142857142857142 + }, + "oab_exams": { + "acc,all": 0.47425968109339406, + "acc,exam_id__2017-24": 0.475, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2015-18": 0.525, + "acc,exam_id__2018-25": 0.5, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2016-20": 0.5375, + "acc,exam_id__2016-21": 0.45, + "acc,exam_id__2011-04": 0.4375, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2010-02": 0.56, + "acc,exam_id__2013-10": 0.4875, + "acc,exam_id__2012-07": 0.475, + "acc,exam_id__2015-17": 0.6025641025641025, + "acc,exam_id__2013-11": 0.4875, + "acc,exam_id__2012-09": 0.38961038961038963, + "acc,exam_id__2015-16": 0.4125, + "acc,exam_id__2012-08": 0.4875, + "acc,exam_id__2011-03": 0.5353535353535354, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2014-13": 0.4625, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2014-14": 0.475, + "acc,exam_id__2012-06a": 0.475, + "acc,exam_id__2017-22": 0.4625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.44313725490196076, + "acc,all": 0.28672150411280845 + }, + "tweetsentbr": { + "f1_macro,all": 0.6203101949624122, + "acc,all": 0.6597014925373135, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "233efd607ae0abbd7b46eded2ee7889892b7bdbb", - "model_dtype": "torch.float16", - "model_memory_footprint": 21463060736, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "233efd607ae0abbd7b46eded2ee7889892b7bdbb", + "model_dtype": "torch.float16", + "model_memory_footprint": 21463060736, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=freewheelin/free-solar-evo-v0.1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=freewheelin/free-solar-evo-v0.1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/freewheelin/free-solar-evo-v0.1/results_2024-08-11T12-35-46.464008.json b/freewheelin/free-solar-evo-v0.1/results_2024-08-11T12-35-46.464008.json index 72cc9bdad779b4a70eb9258275d034c55a3a05c4..206d6de5acdc0ea83233df9ad8151fa878b7939c 100644 --- a/freewheelin/free-solar-evo-v0.1/results_2024-08-11T12-35-46.464008.json +++ b/freewheelin/free-solar-evo-v0.1/results_2024-08-11T12-35-46.464008.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.43613011895375997, - "all_grouped_npm": 0.0671842721829389, + "all_grouped_average": 0.5178931281907881, + "all_grouped_npm": 0.21843672992767288, "all_grouped": { "enem_challenge": 0.6207137858642408, "bluex": 0.5479833101529903, "oab_exams": 0.47425968109339406, - "assin2_rte": 0.4015759802167569, + "assin2_rte": 0.6023639703251354, "assin2_sts": 0.655402578466106, - "faquad_nli": 0.2370033914728682, - "hatebr_offensive": 0.22757486049436768, - "portuguese_hate_speech": 0.2954248366013072, - "tweetsentbr": 0.46523264622180927 + "faquad_nli": 0.3555050872093023, + "hatebr_offensive": 0.34136229074155156, + "portuguese_hate_speech": 0.44313725490196076, + "tweetsentbr": 0.6203101949624122 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6207137858642408, "harness|bluex|bluex|None|3": 0.5479833101529903, "harness|oab_exams|oab_exams|None|3": 0.47425968109339406, - "harness|assin2_rte|assin2_rte|None|15": 0.4015759802167569, + "harness|assin2_rte|assin2_rte|None|15": 0.6023639703251354, "harness|assin2_sts|assin2_sts|None|15": 0.655402578466106, - "harness|faquad_nli|faquad_nli|None|15": 0.2370033914728682, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.22757486049436768, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2954248366013072, - "harness|tweetsentbr|tweetsentbr|None|25": 0.46523264622180927 + "harness|faquad_nli|faquad_nli|None|15": 0.3555050872093023, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.34136229074155156, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.44313725490196076, + "harness|tweetsentbr|tweetsentbr|None|25": 0.6203101949624122 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6207137858642408, @@ -125,9 +125,9 @@ "main_score": 0.47425968109339406 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.4015759802167569, + "f1_macro,all": 0.6023639703251354, "acc,all": 0.5020424836601307, - "main_score": 0.4015759802167569 + "main_score": 0.6023639703251354 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.655402578466106, @@ -135,24 +135,24 @@ "main_score": 0.655402578466106 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.2370033914728682, + "f1_macro,all": 0.3555050872093023, "acc,all": 0.2569230769230769, - "main_score": 0.2370033914728682 + "main_score": 0.3555050872093023 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.22757486049436768, + "f1_macro,all": 0.34136229074155156, "acc,all": 0.22142857142857142, - "main_score": 0.22757486049436768 + "main_score": 0.34136229074155156 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.2954248366013072, + "f1_macro,all": 0.44313725490196076, "acc,all": 0.28672150411280845, - "main_score": 0.2954248366013072 + "main_score": 0.44313725490196076 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.46523264622180927, + "f1_macro,all": 0.6203101949624122, "acc,all": 0.6597014925373135, - "main_score": 0.46523264622180927 + "main_score": 0.6203101949624122 } }, "config_tasks": { diff --git a/freewheelin/free-solar-evo-v0.11/raw_2024-08-09T06-28-11.333017/results.json b/freewheelin/free-solar-evo-v0.11/raw_2024-08-09T06-28-11.333017/results.json index 2d9beb4570b3ff6e3cb1e28209439657a40b9ca8..0d2080075114a4131073b0d882f34d237ff791bd 100644 --- a/freewheelin/free-solar-evo-v0.11/raw_2024-08-09T06-28-11.333017/results.json +++ b/freewheelin/free-solar-evo-v0.11/raw_2024-08-09T06-28-11.333017/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.43288217311237, - "acc,all": 0.5367647058823529, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6657294593588593, - "mse,all": 1.0807476840465797, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.545201668984701, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__USP_2019": 0.375, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__UNICAMP_2019": 0.5, - "acc,exam_id__USP_2018": 0.4074074074074074, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6277116864940517, - "acc,exam_id__2016_2": 0.6097560975609756, - "acc,exam_id__2015": 0.6722689075630253, - "acc,exam_id__2011": 0.717948717948718, - "acc,exam_id__2022": 0.6015037593984962, - "acc,exam_id__2013": 0.6018518518518519, - "acc,exam_id__2016": 0.5454545454545454, - "acc,exam_id__2014": 0.5871559633027523, - "acc,exam_id__2017": 0.6293103448275862, - "acc,exam_id__2009": 0.6434782608695652, - "acc,exam_id__2010": 0.6495726495726496, - "acc,exam_id__2012": 0.646551724137931, - "acc,exam_id__2023": 0.6296296296296297 - }, - "faquad_nli": { - "f1_macro,all": 0.26616161616161615, - "acc,all": 0.2923076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.21032477797453067, - "acc,all": 0.2007142857142857 - }, - "oab_exams": { - "acc,all": 0.47517084282460137, - "acc,exam_id__2015-17": 0.6025641025641025, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2015-18": 0.525, - "acc,exam_id__2011-04": 0.45, - "acc,exam_id__2016-21": 0.4625, - "acc,exam_id__2012-06": 0.4875, - "acc,exam_id__2011-03": 0.5252525252525253, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2012-07": 0.4625, - "acc,exam_id__2012-06a": 0.45, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2017-22": 0.45, - "acc,exam_id__2017-24": 0.4875, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2018-25": 0.5, - "acc,exam_id__2016-20": 0.4875, - "acc,exam_id__2012-08": 0.5, - "acc,exam_id__2013-10": 0.5, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2010-02": 0.57, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2014-14": 0.525, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.2899387716905965, - "acc,all": 0.27144535840188017 - }, - "tweetsentbr": { - "f1_macro,all": 0.47397909196553545, - "acc,all": 0.6681592039800995, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.649323259668555, + "acc,all": 0.5367647058823529, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6657294593588593, + "mse,all": 1.0807476840465797, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.545201668984701, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__USP_2019": 0.375, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__UNICAMP_2019": 0.5, + "acc,exam_id__USP_2018": 0.4074074074074074, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6277116864940517, + "acc,exam_id__2016_2": 0.6097560975609756, + "acc,exam_id__2015": 0.6722689075630253, + "acc,exam_id__2011": 0.717948717948718, + "acc,exam_id__2022": 0.6015037593984962, + "acc,exam_id__2013": 0.6018518518518519, + "acc,exam_id__2016": 0.5454545454545454, + "acc,exam_id__2014": 0.5871559633027523, + "acc,exam_id__2017": 0.6293103448275862, + "acc,exam_id__2009": 0.6434782608695652, + "acc,exam_id__2010": 0.6495726495726496, + "acc,exam_id__2012": 0.646551724137931, + "acc,exam_id__2023": 0.6296296296296297 + }, + "faquad_nli": { + "f1_macro,all": 0.39924242424242423, + "acc,all": 0.2923076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.315487166961796, + "acc,all": 0.2007142857142857 + }, + "oab_exams": { + "acc,all": 0.47517084282460137, + "acc,exam_id__2015-17": 0.6025641025641025, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2015-18": 0.525, + "acc,exam_id__2011-04": 0.45, + "acc,exam_id__2016-21": 0.4625, + "acc,exam_id__2012-06": 0.4875, + "acc,exam_id__2011-03": 0.5252525252525253, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2012-07": 0.4625, + "acc,exam_id__2012-06a": 0.45, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2017-22": 0.45, + "acc,exam_id__2017-24": 0.4875, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2018-25": 0.5, + "acc,exam_id__2016-20": 0.4875, + "acc,exam_id__2012-08": 0.5, + "acc,exam_id__2013-10": 0.5, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2010-02": 0.57, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2014-14": 0.525, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.43490815753589473, + "acc,all": 0.27144535840188017 + }, + "tweetsentbr": { + "f1_macro,all": 0.6319721226207139, + "acc,all": 0.6681592039800995, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "17fc24a557bd3c3836abc9f6a367c803cba0cccd", - "model_dtype": "torch.float16", - "model_memory_footprint": 21463060736, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "17fc24a557bd3c3836abc9f6a367c803cba0cccd", + "model_dtype": "torch.float16", + "model_memory_footprint": 21463060736, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=freewheelin/free-solar-evo-v0.11,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=freewheelin/free-solar-evo-v0.11,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/freewheelin/free-solar-evo-v0.11/results_2024-08-09T06-28-11.333017.json b/freewheelin/free-solar-evo-v0.11/results_2024-08-09T06-28-11.333017.json index a486ea28fd5227e1841bc084bfeb68e84e7cb54f..9ca24442b6962e9316c287d09266bd062beecf28 100644 --- a/freewheelin/free-solar-evo-v0.11/results_2024-08-09T06-28-11.333017.json +++ b/freewheelin/free-solar-evo-v0.11/results_2024-08-09T06-28-11.333017.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.44301112095187356, - "all_grouped_npm": 0.07839510465613597, + "all_grouped_average": 0.5271940876323997, + "all_grouped_npm": 0.23408417645238902, "all_grouped": { "enem_challenge": 0.6277116864940517, "bluex": 0.545201668984701, "oab_exams": 0.47517084282460137, - "assin2_rte": 0.43288217311237, + "assin2_rte": 0.649323259668555, "assin2_sts": 0.6657294593588593, - "faquad_nli": 0.26616161616161615, - "hatebr_offensive": 0.21032477797453067, - "portuguese_hate_speech": 0.2899387716905965, - "tweetsentbr": 0.47397909196553545 + "faquad_nli": 0.39924242424242423, + "hatebr_offensive": 0.315487166961796, + "portuguese_hate_speech": 0.43490815753589473, + "tweetsentbr": 0.6319721226207139 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6277116864940517, "harness|bluex|bluex|None|3": 0.545201668984701, "harness|oab_exams|oab_exams|None|3": 0.47517084282460137, - "harness|assin2_rte|assin2_rte|None|15": 0.43288217311237, + "harness|assin2_rte|assin2_rte|None|15": 0.649323259668555, "harness|assin2_sts|assin2_sts|None|15": 0.6657294593588593, - "harness|faquad_nli|faquad_nli|None|15": 0.26616161616161615, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.21032477797453067, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2899387716905965, - "harness|tweetsentbr|tweetsentbr|None|25": 0.47397909196553545 + "harness|faquad_nli|faquad_nli|None|15": 0.39924242424242423, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.315487166961796, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.43490815753589473, + "harness|tweetsentbr|tweetsentbr|None|25": 0.6319721226207139 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6277116864940517, @@ -125,9 +125,9 @@ "main_score": 0.47517084282460137 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.43288217311237, + "f1_macro,all": 0.649323259668555, "acc,all": 0.5367647058823529, - "main_score": 0.43288217311237 + "main_score": 0.649323259668555 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.6657294593588593, @@ -135,24 +135,24 @@ "main_score": 0.6657294593588593 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.26616161616161615, + "f1_macro,all": 0.39924242424242423, "acc,all": 0.2923076923076923, - "main_score": 0.26616161616161615 + "main_score": 0.39924242424242423 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.21032477797453067, + "f1_macro,all": 0.315487166961796, "acc,all": 0.2007142857142857, - "main_score": 0.21032477797453067 + "main_score": 0.315487166961796 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.2899387716905965, + "f1_macro,all": 0.43490815753589473, "acc,all": 0.27144535840188017, - "main_score": 0.2899387716905965 + "main_score": 0.43490815753589473 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.47397909196553545, + "f1_macro,all": 0.6319721226207139, "acc,all": 0.6681592039800995, - "main_score": 0.47397909196553545 + "main_score": 0.6319721226207139 } }, "config_tasks": { diff --git a/freewheelin/free-solar-evo-v0.13/raw_2024-08-12T03-39-46.797616/results.json b/freewheelin/free-solar-evo-v0.13/raw_2024-08-12T03-39-46.797616/results.json index 938787eb3829f3fe847f6361b7cebb90a4c428ca..33a4371e862c73ca9d0cb9571ba9ae1d9e125f91 100644 --- a/freewheelin/free-solar-evo-v0.13/raw_2024-08-12T03-39-46.797616/results.json +++ b/freewheelin/free-solar-evo-v0.13/raw_2024-08-12T03-39-46.797616/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.45551099108603826, - "acc,all": 0.5755718954248366, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6703707809587911, - "mse,all": 1.0351343349130195, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5465924895688457, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__USP_2018": 0.4074074074074074, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2019": 0.375, - "acc,exam_id__UNICAMP_2019": 0.52, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6277116864940517, - "acc,exam_id__2009": 0.6434782608695652, - "acc,exam_id__2012": 0.6551724137931034, - "acc,exam_id__2010": 0.6495726495726496, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2013": 0.6018518518518519, - "acc,exam_id__2011": 0.7264957264957265, - "acc,exam_id__2015": 0.6638655462184874, - "acc,exam_id__2016_2": 0.5934959349593496, - "acc,exam_id__2017": 0.6293103448275862, - "acc,exam_id__2014": 0.5871559633027523, - "acc,exam_id__2016": 0.5537190082644629, - "acc,exam_id__2023": 0.6222222222222222 - }, - "faquad_nli": { - "f1_macro,all": 0.3025882081676073, - "acc,all": 0.35846153846153844, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.271086821743536, - "acc,all": 0.2714285714285714 - }, - "oab_exams": { - "acc,all": 0.4788154897494305, - "acc,exam_id__2015-17": 0.6025641025641025, - "acc,exam_id__2013-10": 0.5, - "acc,exam_id__2018-25": 0.5, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2012-07": 0.4625, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2011-03": 0.5252525252525253, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2013-11": 0.5125, - "acc,exam_id__2011-04": 0.475, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2017-23": 0.475, - "acc,exam_id__2014-13": 0.4375, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2017-24": 0.475, - "acc,exam_id__2017-22": 0.45, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2010-02": 0.55, - "acc,exam_id__2016-21": 0.4625, - "acc,exam_id__2016-20": 0.4875, - "acc,exam_id__2015-16": 0.3875, - "acc,exam_id__2016-20a": 0.45, - "acc,exam_id__2012-08": 0.5, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2012-06a": 0.45, - "acc,exam_id__2015-18": 0.55, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.3291242937853107, - "acc,all": 0.3407755581668625 - }, - "tweetsentbr": { - "f1_macro,all": 0.4739150441119442, - "acc,all": 0.6686567164179105, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6832664866290573, + "acc,all": 0.5755718954248366, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6703707809587911, + "mse,all": 1.0351343349130195, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5465924895688457, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__USP_2018": 0.4074074074074074, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2019": 0.375, + "acc,exam_id__UNICAMP_2019": 0.52, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6277116864940517, + "acc,exam_id__2009": 0.6434782608695652, + "acc,exam_id__2012": 0.6551724137931034, + "acc,exam_id__2010": 0.6495726495726496, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2013": 0.6018518518518519, + "acc,exam_id__2011": 0.7264957264957265, + "acc,exam_id__2015": 0.6638655462184874, + "acc,exam_id__2016_2": 0.5934959349593496, + "acc,exam_id__2017": 0.6293103448275862, + "acc,exam_id__2014": 0.5871559633027523, + "acc,exam_id__2016": 0.5537190082644629, + "acc,exam_id__2023": 0.6222222222222222 + }, + "faquad_nli": { + "f1_macro,all": 0.4538823122514109, + "acc,all": 0.35846153846153844, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.40663023261530395, + "acc,all": 0.2714285714285714 + }, + "oab_exams": { + "acc,all": 0.4788154897494305, + "acc,exam_id__2015-17": 0.6025641025641025, + "acc,exam_id__2013-10": 0.5, + "acc,exam_id__2018-25": 0.5, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2012-07": 0.4625, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2011-03": 0.5252525252525253, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2013-11": 0.5125, + "acc,exam_id__2011-04": 0.475, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2017-23": 0.475, + "acc,exam_id__2014-13": 0.4375, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2017-24": 0.475, + "acc,exam_id__2017-22": 0.45, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2010-02": 0.55, + "acc,exam_id__2016-21": 0.4625, + "acc,exam_id__2016-20": 0.4875, + "acc,exam_id__2015-16": 0.3875, + "acc,exam_id__2016-20a": 0.45, + "acc,exam_id__2012-08": 0.5, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2012-06a": 0.45, + "acc,exam_id__2015-18": 0.55, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4936864406779661, + "acc,all": 0.3407755581668625 + }, + "tweetsentbr": { + "f1_macro,all": 0.6318867254825923, + "acc,all": 0.6686567164179105, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2a7eb72f84c54898630f9db470eee0f936a64396", - "model_dtype": "torch.float16", - "model_memory_footprint": 21463060736, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2a7eb72f84c54898630f9db470eee0f936a64396", + "model_dtype": "torch.float16", + "model_memory_footprint": 21463060736, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=freewheelin/free-solar-evo-v0.13,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=freewheelin/free-solar-evo-v0.13,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/freewheelin/free-solar-evo-v0.13/results_2024-08-12T03-39-46.797616.json b/freewheelin/free-solar-evo-v0.13/results_2024-08-12T03-39-46.797616.json index b38249b76d263f96cc8128ec48c7b99151820f7f..63506ddfc0afc8aebab7750b3b7485a156193e1e 100644 --- a/freewheelin/free-solar-evo-v0.13/results_2024-08-12T03-39-46.797616.json +++ b/freewheelin/free-solar-evo-v0.13/results_2024-08-12T03-39-46.797616.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.46174620062950616, - "all_grouped_npm": 0.11396784397018361, + "all_grouped_average": 0.5547602938252721, + "all_grouped_npm": 0.2868175249640305, "all_grouped": { "enem_challenge": 0.6277116864940517, "bluex": 0.5465924895688457, "oab_exams": 0.4788154897494305, - "assin2_rte": 0.45551099108603826, + "assin2_rte": 0.6832664866290573, "assin2_sts": 0.6703707809587911, - "faquad_nli": 0.3025882081676073, - "hatebr_offensive": 0.271086821743536, - "portuguese_hate_speech": 0.3291242937853107, - "tweetsentbr": 0.4739150441119442 + "faquad_nli": 0.4538823122514109, + "hatebr_offensive": 0.40663023261530395, + "portuguese_hate_speech": 0.4936864406779661, + "tweetsentbr": 0.6318867254825923 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6277116864940517, "harness|bluex|bluex|None|3": 0.5465924895688457, "harness|oab_exams|oab_exams|None|3": 0.4788154897494305, - "harness|assin2_rte|assin2_rte|None|15": 0.45551099108603826, + "harness|assin2_rte|assin2_rte|None|15": 0.6832664866290573, "harness|assin2_sts|assin2_sts|None|15": 0.6703707809587911, - "harness|faquad_nli|faquad_nli|None|15": 0.3025882081676073, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.271086821743536, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3291242937853107, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4739150441119442 + "harness|faquad_nli|faquad_nli|None|15": 0.4538823122514109, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.40663023261530395, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4936864406779661, + "harness|tweetsentbr|tweetsentbr|None|25": 0.6318867254825923 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6277116864940517, @@ -125,9 +125,9 @@ "main_score": 0.4788154897494305 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.45551099108603826, + "f1_macro,all": 0.6832664866290573, "acc,all": 0.5755718954248366, - "main_score": 0.45551099108603826 + "main_score": 0.6832664866290573 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.6703707809587911, @@ -135,24 +135,24 @@ "main_score": 0.6703707809587911 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.3025882081676073, + "f1_macro,all": 0.4538823122514109, "acc,all": 0.35846153846153844, - "main_score": 0.3025882081676073 + "main_score": 0.4538823122514109 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.271086821743536, + "f1_macro,all": 0.40663023261530395, "acc,all": 0.2714285714285714, - "main_score": 0.271086821743536 + "main_score": 0.40663023261530395 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.3291242937853107, + "f1_macro,all": 0.4936864406779661, "acc,all": 0.3407755581668625, - "main_score": 0.3291242937853107 + "main_score": 0.4936864406779661 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4739150441119442, + "f1_macro,all": 0.6318867254825923, "acc,all": 0.6686567164179105, - "main_score": 0.4739150441119442 + "main_score": 0.6318867254825923 } }, "config_tasks": { diff --git a/ghost-x/ghost-8b-beta-1608/raw_2024-08-25T01-39-27.386034/results.json b/ghost-x/ghost-8b-beta-1608/raw_2024-08-25T01-39-27.386034/results.json index 5e9cfd1ffde165c7f1392010df5c1f13f2d7c75c..13665fab5d579473e20c146c08a74e7aca349661 100644 --- a/ghost-x/ghost-8b-beta-1608/raw_2024-08-25T01-39-27.386034/results.json +++ b/ghost-x/ghost-8b-beta-1608/raw_2024-08-25T01-39-27.386034/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9065210692034569, - "acc,all": 0.9068627450980392, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6934426233290423, - "mse,all": 0.7274305555555556, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.48678720445062584, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__USP_2024": 0.5609756097560976, - "acc,exam_id__USP_2018": 0.3888888888888889, - "acc,exam_id__USP_2021": 0.36538461538461536, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2021_2": 0.43137254901960786, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__UNICAMP_2020": 0.4727272727272727, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2019": 0.375, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__UNICAMP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.642407277816655, - "acc,exam_id__2012": 0.603448275862069, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2017": 0.646551724137931, - "acc,exam_id__2010": 0.6068376068376068, - "acc,exam_id__2023": 0.6962962962962963, - "acc,exam_id__2016": 0.6198347107438017, - "acc,exam_id__2014": 0.6422018348623854, - "acc,exam_id__2013": 0.6574074074074074, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2022": 0.6541353383458647, - "acc,exam_id__2009": 0.6086956521739131 - }, - "faquad_nli": { - "f1_macro,all": 0.363744357329532, - "acc,all": 0.36615384615384616, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8457130262287855, - "acc,all": 0.8457142857142858 - }, - "oab_exams": { - "acc,all": 0.42779043280182233, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2012-08": 0.4, - "acc,exam_id__2015-17": 0.6153846153846154, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2012-06a": 0.475, - "acc,exam_id__2014-13": 0.4125, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2018-25": 0.4125, - "acc,exam_id__2017-22": 0.6, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2010-02": 0.36, - "acc,exam_id__2016-20": 0.425, - "acc,exam_id__2011-05": 0.4, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2013-11": 0.5375, - "acc,exam_id__2011-04": 0.3625, - "acc,exam_id__2017-23": 0.4, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2015-16": 0.3625, - "acc,exam_id__2013-10": 0.3875, - "acc,exam_id__2014-15": 0.5128205128205128, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2012-06": 0.475, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.583756985233497, - "acc,all": 0.5851938895417156 - }, - "tweetsentbr": { - "f1_macro,all": 0.5376464442224734, - "acc,all": 0.7298507462686568, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9065210692034569, + "acc,all": 0.9068627450980392, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6934426233290423, + "mse,all": 0.7274305555555556, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.48678720445062584, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__USP_2024": 0.5609756097560976, + "acc,exam_id__USP_2018": 0.3888888888888889, + "acc,exam_id__USP_2021": 0.36538461538461536, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2021_2": 0.43137254901960786, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__UNICAMP_2020": 0.4727272727272727, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2019": 0.375, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__UNICAMP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.642407277816655, + "acc,exam_id__2012": 0.603448275862069, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2017": 0.646551724137931, + "acc,exam_id__2010": 0.6068376068376068, + "acc,exam_id__2023": 0.6962962962962963, + "acc,exam_id__2016": 0.6198347107438017, + "acc,exam_id__2014": 0.6422018348623854, + "acc,exam_id__2013": 0.6574074074074074, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2022": 0.6541353383458647, + "acc,exam_id__2009": 0.6086956521739131 + }, + "faquad_nli": { + "f1_macro,all": 0.363744357329532, + "acc,all": 0.36615384615384616, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8457130262287855, + "acc,all": 0.8457142857142858 + }, + "oab_exams": { + "acc,all": 0.42779043280182233, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2012-08": 0.4, + "acc,exam_id__2015-17": 0.6153846153846154, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2012-06a": 0.475, + "acc,exam_id__2014-13": 0.4125, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2018-25": 0.4125, + "acc,exam_id__2017-22": 0.6, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2010-02": 0.36, + "acc,exam_id__2016-20": 0.425, + "acc,exam_id__2011-05": 0.4, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2013-11": 0.5375, + "acc,exam_id__2011-04": 0.3625, + "acc,exam_id__2017-23": 0.4, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2015-16": 0.3625, + "acc,exam_id__2013-10": 0.3875, + "acc,exam_id__2014-15": 0.5128205128205128, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2012-06": 0.475, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.583756985233497, + "acc,all": 0.5851938895417156 + }, + "tweetsentbr": { + "f1_macro,all": 0.7168619256299648, + "acc,all": 0.7298507462686568, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "6d1b3853aab774af5a4db21ff9d5764918fb48f5", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530944, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1350.5322712418301, - "min_seq_length": 1331, - "max_seq_length": 1414, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1541.5322712418301, - "min_seq_length": 1522, - "max_seq_length": 1605, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1492.7719054242002, - "min_seq_length": 1173, - "max_seq_length": 2142, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1420.3547935619315, - "min_seq_length": 1195, - "max_seq_length": 2348, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1479.8215384615385, - "min_seq_length": 1434, - "max_seq_length": 1576, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "6d1b3853aab774af5a4db21ff9d5764918fb48f5", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530944, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1331.3878571428572, - "min_seq_length": 1311, - "max_seq_length": 1550, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1228.3772209567198, - "min_seq_length": 996, - "max_seq_length": 1662, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1350.5322712418301, + "min_seq_length": 1331, + "max_seq_length": 1414, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1541.5322712418301, + "min_seq_length": 1522, + "max_seq_length": 1605, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1492.7719054242002, + "min_seq_length": 1173, + "max_seq_length": 2142, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1420.3547935619315, + "min_seq_length": 1195, + "max_seq_length": 2348, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1479.8215384615385, + "min_seq_length": 1434, + "max_seq_length": 1576, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1331.3878571428572, + "min_seq_length": 1311, + "max_seq_length": 1550, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1228.3772209567198, + "min_seq_length": 996, + "max_seq_length": 1662, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1728.4195064629848, + "min_seq_length": 1698, + "max_seq_length": 1760, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1589.1537313432837, + "min_seq_length": 1572, + "max_seq_length": 1637, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1728.4195064629848, - "min_seq_length": 1698, - "max_seq_length": 1760, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=ghost-x/ghost-8b-beta-1608,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1589.1537313432837, - "min_seq_length": 1572, - "max_seq_length": 1637, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=ghost-x/ghost-8b-beta-1608,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/ghost-x/ghost-8b-beta-1608/results_2024-08-25T01-39-27.386034.json b/ghost-x/ghost-8b-beta-1608/results_2024-08-25T01-39-27.386034.json index 6c162305dde473516b9edeedcd8d23e15b944973..64a41c038c0a8afcd4a5e97b2c9bc545e4efa2d7 100644 --- a/ghost-x/ghost-8b-beta-1608/results_2024-08-25T01-39-27.386034.json +++ b/ghost-x/ghost-8b-beta-1608/results_2024-08-25T01-39-27.386034.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6097566022906544, - "all_grouped_npm": 0.40769101439246114, + "all_grouped_average": 0.6296694335581534, + "all_grouped_npm": 0.4373232037786205, "all_grouped": { "enem_challenge": 0.642407277816655, "bluex": 0.48678720445062584, @@ -45,7 +45,7 @@ "faquad_nli": 0.363744357329532, "hatebr_offensive": 0.8457130262287855, "portuguese_hate_speech": 0.583756985233497, - "tweetsentbr": 0.5376464442224734 + "tweetsentbr": 0.7168619256299648 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.642407277816655, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.363744357329532, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8457130262287855, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.583756985233497, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5376464442224734 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7168619256299648 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.642407277816655, @@ -150,9 +150,9 @@ "main_score": 0.583756985233497 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5376464442224734, + "f1_macro,all": 0.7168619256299648, "acc,all": 0.7298507462686568, - "main_score": 0.5376464442224734 + "main_score": 0.7168619256299648 } }, "config_tasks": { diff --git a/ghost-x/ghost-8b-beta/raw_2024-07-24T08-26-58.470678/results.json b/ghost-x/ghost-8b-beta/raw_2024-07-24T08-26-58.470678/results.json index 1101ee16fcd74e1861d7a2c81c8be8ffc4d706a3..19eb3bf9c4df58e07c84e0f74de35c781338c0b2 100644 --- a/ghost-x/ghost-8b-beta/raw_2024-07-24T08-26-58.470678/results.json +++ b/ghost-x/ghost-8b-beta/raw_2024-07-24T08-26-58.470678/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9157033248081841, - "acc,all": 0.9158496732026143, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6786078768211791, - "mse,all": 0.8034763071895423, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.3769123783031989, - "acc,exam_id__USP_2018": 0.3148148148148148, - "acc,exam_id__USP_2021": 0.3269230769230769, - "acc,exam_id__UNICAMP_2018": 0.2962962962962963, - "acc,exam_id__UNICAMP_2020": 0.38181818181818183, - "acc,exam_id__UNICAMP_2022": 0.38461538461538464, - "acc,exam_id__UNICAMP_2024": 0.37777777777777777, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2022": 0.3877551020408163, - "acc,exam_id__USP_2023": 0.4090909090909091, - "acc,exam_id__UNICAMP_2021_2": 0.3333333333333333, - "acc,exam_id__UNICAMP_2023": 0.46511627906976744, - "acc,exam_id__USP_2024": 0.3902439024390244, - "acc,exam_id__USP_2020": 0.42857142857142855, - "acc,exam_id__UNICAMP_2021_1": 0.41304347826086957, - "acc,exam_id__UNICAMP_2019": 0.34, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5745276417074877, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2015": 0.5462184873949579, - "acc,exam_id__2013": 0.5833333333333334, - "acc,exam_id__2009": 0.5304347826086957, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2017": 0.603448275862069, - "acc,exam_id__2010": 0.5213675213675214, - "acc,exam_id__2012": 0.4827586206896552, - "acc,exam_id__2016": 0.5702479338842975, - "acc,exam_id__2011": 0.6324786324786325, - "acc,exam_id__2016_2": 0.5772357723577236, - "acc,exam_id__2023": 0.6074074074074074 - }, - "faquad_nli": { - "f1_macro,all": 0.608080080260396, - "acc,all": 0.6292307692307693, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8476219675775112, - "acc,all": 0.8478571428571429 - }, - "oab_exams": { - "acc,all": 0.4041002277904328, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2017-22": 0.475, - "acc,exam_id__2012-08": 0.3, - "acc,exam_id__2017-23": 0.3125, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2016-20": 0.475, - "acc,exam_id__2014-15": 0.5, - "acc,exam_id__2012-06a": 0.4875, - "acc,exam_id__2016-20a": 0.475, - "acc,exam_id__2014-13": 0.3625, - "acc,exam_id__2013-10": 0.4375, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2011-03": 0.36363636363636365, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2016-19": 0.4230769230769231, - "acc,exam_id__2014-14": 0.425, - "acc,exam_id__2010-01": 0.3176470588235294, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2017-24": 0.4125, - "acc,exam_id__2015-18": 0.425, - "acc,exam_id__2011-04": 0.3625, - "acc,exam_id__2012-06": 0.3875, - "acc,exam_id__2010-02": 0.33, - "acc,exam_id__2015-17": 0.5256410256410257, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6030103729615091, - "acc,all": 0.6063454759106933 - }, - "tweetsentbr": { - "f1_macro,all": 0.5408689205369703, - "acc,all": 0.7328358208955223, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9157033248081841, + "acc,all": 0.9158496732026143, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6786078768211791, + "mse,all": 0.8034763071895423, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.3769123783031989, + "acc,exam_id__USP_2018": 0.3148148148148148, + "acc,exam_id__USP_2021": 0.3269230769230769, + "acc,exam_id__UNICAMP_2018": 0.2962962962962963, + "acc,exam_id__UNICAMP_2020": 0.38181818181818183, + "acc,exam_id__UNICAMP_2022": 0.38461538461538464, + "acc,exam_id__UNICAMP_2024": 0.37777777777777777, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2022": 0.3877551020408163, + "acc,exam_id__USP_2023": 0.4090909090909091, + "acc,exam_id__UNICAMP_2021_2": 0.3333333333333333, + "acc,exam_id__UNICAMP_2023": 0.46511627906976744, + "acc,exam_id__USP_2024": 0.3902439024390244, + "acc,exam_id__USP_2020": 0.42857142857142855, + "acc,exam_id__UNICAMP_2021_1": 0.41304347826086957, + "acc,exam_id__UNICAMP_2019": 0.34, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5745276417074877, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2015": 0.5462184873949579, + "acc,exam_id__2013": 0.5833333333333334, + "acc,exam_id__2009": 0.5304347826086957, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2017": 0.603448275862069, + "acc,exam_id__2010": 0.5213675213675214, + "acc,exam_id__2012": 0.4827586206896552, + "acc,exam_id__2016": 0.5702479338842975, + "acc,exam_id__2011": 0.6324786324786325, + "acc,exam_id__2016_2": 0.5772357723577236, + "acc,exam_id__2023": 0.6074074074074074 + }, + "faquad_nli": { + "f1_macro,all": 0.608080080260396, + "acc,all": 0.6292307692307693, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8476219675775112, + "acc,all": 0.8478571428571429 + }, + "oab_exams": { + "acc,all": 0.4041002277904328, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2017-22": 0.475, + "acc,exam_id__2012-08": 0.3, + "acc,exam_id__2017-23": 0.3125, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2016-20": 0.475, + "acc,exam_id__2014-15": 0.5, + "acc,exam_id__2012-06a": 0.4875, + "acc,exam_id__2016-20a": 0.475, + "acc,exam_id__2014-13": 0.3625, + "acc,exam_id__2013-10": 0.4375, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2011-03": 0.36363636363636365, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2016-19": 0.4230769230769231, + "acc,exam_id__2014-14": 0.425, + "acc,exam_id__2010-01": 0.3176470588235294, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2017-24": 0.4125, + "acc,exam_id__2015-18": 0.425, + "acc,exam_id__2011-04": 0.3625, + "acc,exam_id__2012-06": 0.3875, + "acc,exam_id__2010-02": 0.33, + "acc,exam_id__2015-17": 0.5256410256410257, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6030103729615091, + "acc,all": 0.6063454759106933 + }, + "tweetsentbr": { + "f1_macro,all": 0.7211585607159606, + "acc,all": 0.7328358208955223, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "7bc85f19e9f1bcb99d3fbe81e1be7618fe9e77f1", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530944, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1350.5322712418301, - "min_seq_length": 1331, - "max_seq_length": 1414, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1541.5322712418301, - "min_seq_length": 1522, - "max_seq_length": 1605, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1492.7719054242002, - "min_seq_length": 1173, - "max_seq_length": 2142, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1420.3547935619315, - "min_seq_length": 1195, - "max_seq_length": 2348, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1479.8215384615385, - "min_seq_length": 1434, - "max_seq_length": 1576, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "7bc85f19e9f1bcb99d3fbe81e1be7618fe9e77f1", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530944, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1331.3878571428572, - "min_seq_length": 1311, - "max_seq_length": 1550, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1228.3772209567198, - "min_seq_length": 996, - "max_seq_length": 1662, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1350.5322712418301, + "min_seq_length": 1331, + "max_seq_length": 1414, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1541.5322712418301, + "min_seq_length": 1522, + "max_seq_length": 1605, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1492.7719054242002, + "min_seq_length": 1173, + "max_seq_length": 2142, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1420.3547935619315, + "min_seq_length": 1195, + "max_seq_length": 2348, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1479.8215384615385, + "min_seq_length": 1434, + "max_seq_length": 1576, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1331.3878571428572, + "min_seq_length": 1311, + "max_seq_length": 1550, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1228.3772209567198, + "min_seq_length": 996, + "max_seq_length": 1662, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1728.4195064629848, + "min_seq_length": 1698, + "max_seq_length": 1760, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1589.1537313432837, + "min_seq_length": 1572, + "max_seq_length": 1637, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1728.4195064629848, - "min_seq_length": 1698, - "max_seq_length": 1760, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=ghost-x/ghost-8b-beta,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1589.1537313432837, - "min_seq_length": 1572, - "max_seq_length": 1637, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=ghost-x/ghost-8b-beta,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/ghost-x/ghost-8b-beta/results_2024-07-24T08-26-58.470678.json b/ghost-x/ghost-8b-beta/results_2024-07-24T08-26-58.470678.json index fd96dc4a7a24f39bb8213621c8fade254c25aa6f..c8d6799602d9941ed05831f7288a2ce7514d1156 100644 --- a/ghost-x/ghost-8b-beta/results_2024-07-24T08-26-58.470678.json +++ b/ghost-x/ghost-8b-beta/results_2024-07-24T08-26-58.470678.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6166036434185409, - "all_grouped_npm": 0.4343614336290362, + "all_grouped_average": 0.636635825660651, + "all_grouped_npm": 0.46417122863217614, "all_grouped": { "enem_challenge": 0.5745276417074877, "bluex": 0.3769123783031989, @@ -45,7 +45,7 @@ "faquad_nli": 0.608080080260396, "hatebr_offensive": 0.8476219675775112, "portuguese_hate_speech": 0.6030103729615091, - "tweetsentbr": 0.5408689205369703 + "tweetsentbr": 0.7211585607159606 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5745276417074877, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.608080080260396, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8476219675775112, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6030103729615091, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5408689205369703 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7211585607159606 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5745276417074877, @@ -150,9 +150,9 @@ "main_score": 0.6030103729615091 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5408689205369703, + "f1_macro,all": 0.7211585607159606, "acc,all": 0.7328358208955223, - "main_score": 0.5408689205369703 + "main_score": 0.7211585607159606 } }, "config_tasks": { diff --git a/google/mt5-base/raw_2024-04-18T06-10-32.403150/results.json b/google/mt5-base/raw_2024-04-18T06-10-32.403150/results.json index 94007246d3b0a26a8fe5d8ff58de4f3481110f7b..8a688664b4b7a607810bea80f20facffa5263483 100644 --- a/google/mt5-base/raw_2024-04-18T06-10-32.403150/results.json +++ b/google/mt5-base/raw_2024-04-18T06-10-32.403150/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.0005442176870748299, - "acc,all": 0.0004084967320261438, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.0435769262346769, - "mse,all": 7.174387254901961, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.10152990264255911, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__USP_2020": 0.17857142857142858, - "acc,exam_id__USP_2022": 0.1836734693877551, - "acc,exam_id__USP_2021": 0.23076923076923078, - "acc,exam_id__USP_2024": 0.21951219512195122, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__USP_2018": 0.24074074074074073, - "acc,exam_id__USP_2023": 0.1590909090909091, - "acc,exam_id__UNICAMP_2019": 0.04, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__UNICAMP_2023": 0.023255813953488372, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.18964310706787962, - "acc,exam_id__2013": 0.18518518518518517, - "acc,exam_id__2015": 0.13445378151260504, - "acc,exam_id__2011": 0.23076923076923078, - "acc,exam_id__2023": 0.2074074074074074, - "acc,exam_id__2016_2": 0.17073170731707318, - "acc,exam_id__2022": 0.18045112781954886, - "acc,exam_id__2017": 0.1896551724137931, - "acc,exam_id__2010": 0.2222222222222222, - "acc,exam_id__2016": 0.19008264462809918, - "acc,exam_id__2009": 0.14782608695652175, - "acc,exam_id__2014": 0.1834862385321101, - "acc,exam_id__2012": 0.23275862068965517 - }, - "faquad_nli": { - "f1_macro,all": 0.22681159420289854, - "acc,all": 0.4815384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.004688232536333802, - "acc,all": 0.0035714285714285713 - }, - "oab_exams": { - "acc,all": 0.23189066059225513, - "acc,exam_id__2012-06": 0.2, - "acc,exam_id__2014-15": 0.23076923076923078, - "acc,exam_id__2011-05": 0.25, - "acc,exam_id__2012-09": 0.2077922077922078, - "acc,exam_id__2010-02": 0.21, - "acc,exam_id__2016-20": 0.275, - "acc,exam_id__2016-19": 0.20512820512820512, - "acc,exam_id__2015-16": 0.2375, - "acc,exam_id__2011-04": 0.25, - "acc,exam_id__2014-13": 0.275, - "acc,exam_id__2012-06a": 0.3, - "acc,exam_id__2015-18": 0.2875, - "acc,exam_id__2011-03": 0.1919191919191919, - "acc,exam_id__2012-07": 0.125, - "acc,exam_id__2015-17": 0.2948717948717949, - "acc,exam_id__2017-22": 0.25, - "acc,exam_id__2013-12": 0.1625, - "acc,exam_id__2012-08": 0.275, - "acc,exam_id__2010-01": 0.21176470588235294, - "acc,exam_id__2013-10": 0.2375, - "acc,exam_id__2016-21": 0.2125, - "acc,exam_id__2017-23": 0.1875, - "acc,exam_id__2017-24": 0.1875, - "acc,exam_id__2013-11": 0.2125, - "acc,exam_id__2016-20a": 0.275, - "acc,exam_id__2018-25": 0.2625, - "acc,exam_id__2014-14": 0.2625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.0, - "acc,all": 0.0 - }, - "tweetsentbr": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.0008163265306122449, + "acc,all": 0.0004084967320261438, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.0435769262346769, + "mse,all": 7.174387254901961, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.10152990264255911, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__USP_2020": 0.17857142857142858, + "acc,exam_id__USP_2022": 0.1836734693877551, + "acc,exam_id__USP_2021": 0.23076923076923078, + "acc,exam_id__USP_2024": 0.21951219512195122, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__USP_2018": 0.24074074074074073, + "acc,exam_id__USP_2023": 0.1590909090909091, + "acc,exam_id__UNICAMP_2019": 0.04, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__UNICAMP_2023": 0.023255813953488372, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.18964310706787962, + "acc,exam_id__2013": 0.18518518518518517, + "acc,exam_id__2015": 0.13445378151260504, + "acc,exam_id__2011": 0.23076923076923078, + "acc,exam_id__2023": 0.2074074074074074, + "acc,exam_id__2016_2": 0.17073170731707318, + "acc,exam_id__2022": 0.18045112781954886, + "acc,exam_id__2017": 0.1896551724137931, + "acc,exam_id__2010": 0.2222222222222222, + "acc,exam_id__2016": 0.19008264462809918, + "acc,exam_id__2009": 0.14782608695652175, + "acc,exam_id__2014": 0.1834862385321101, + "acc,exam_id__2012": 0.23275862068965517 + }, + "faquad_nli": { + "f1_macro,all": 0.34021739130434786, + "acc,all": 0.4815384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.007032348804500704, + "acc,all": 0.0035714285714285713 + }, + "oab_exams": { + "acc,all": 0.23189066059225513, + "acc,exam_id__2012-06": 0.2, + "acc,exam_id__2014-15": 0.23076923076923078, + "acc,exam_id__2011-05": 0.25, + "acc,exam_id__2012-09": 0.2077922077922078, + "acc,exam_id__2010-02": 0.21, + "acc,exam_id__2016-20": 0.275, + "acc,exam_id__2016-19": 0.20512820512820512, + "acc,exam_id__2015-16": 0.2375, + "acc,exam_id__2011-04": 0.25, + "acc,exam_id__2014-13": 0.275, + "acc,exam_id__2012-06a": 0.3, + "acc,exam_id__2015-18": 0.2875, + "acc,exam_id__2011-03": 0.1919191919191919, + "acc,exam_id__2012-07": 0.125, + "acc,exam_id__2015-17": 0.2948717948717949, + "acc,exam_id__2017-22": 0.25, + "acc,exam_id__2013-12": 0.1625, + "acc,exam_id__2012-08": 0.275, + "acc,exam_id__2010-01": 0.21176470588235294, + "acc,exam_id__2013-10": 0.2375, + "acc,exam_id__2016-21": 0.2125, + "acc,exam_id__2017-23": 0.1875, + "acc,exam_id__2017-24": 0.1875, + "acc,exam_id__2013-11": 0.2125, + "acc,exam_id__2016-20a": 0.275, + "acc,exam_id__2018-25": 0.2625, + "acc,exam_id__2014-14": 0.2625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.0, + "acc,all": 0.0 + }, + "tweetsentbr": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2eb15465c5dd7f72a8f7984306ad05ebc3dd1e1f", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 1164802560, - "model_num_parameters": 582401280, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2048, - "max_ctx_length": 2048, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1119.2859477124182, - "min_seq_length": 1098, - "max_seq_length": 1188, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1203.2859477124182, - "min_seq_length": 1182, - "max_seq_length": 1272, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1350.0709318497914, - "min_seq_length": 1045, - "max_seq_length": 1953, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 3, - "non_truncated": 1426, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 4, - "mean_seq_length": 1387.624212736179, - "min_seq_length": 1144, - "max_seq_length": 2516, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972008397480754 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1401.1923076923076, - "min_seq_length": 1350, - "max_seq_length": 1499, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2eb15465c5dd7f72a8f7984306ad05ebc3dd1e1f", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 1164802560, + "model_num_parameters": 582401280, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2048, + "max_ctx_length": 2048, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1025.8842857142856, - "min_seq_length": 1006, - "max_seq_length": 1274, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1258.8929384965832, - "min_seq_length": 1007, - "max_seq_length": 1666, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1119.2859477124182, + "min_seq_length": 1098, + "max_seq_length": 1188, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1203.2859477124182, + "min_seq_length": 1182, + "max_seq_length": 1272, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1350.0709318497914, + "min_seq_length": 1045, + "max_seq_length": 1953, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 3, + "non_truncated": 1426, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 4, + "mean_seq_length": 1387.624212736179, + "min_seq_length": 1144, + "max_seq_length": 2516, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972008397480754 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1401.1923076923076, + "min_seq_length": 1350, + "max_seq_length": 1499, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1025.8842857142856, + "min_seq_length": 1006, + "max_seq_length": 1274, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1258.8929384965832, + "min_seq_length": 1007, + "max_seq_length": 1666, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1516.3290246768508, + "min_seq_length": 1483, + "max_seq_length": 1542, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1376.9557213930348, + "min_seq_length": 1358, + "max_seq_length": 1430, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1516.3290246768508, - "min_seq_length": 1483, - "max_seq_length": 1542, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=google/mt5-base,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1376.9557213930348, - "min_seq_length": 1358, - "max_seq_length": 1430, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=google/mt5-base,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/google/mt5-base/raw_2024-04-19T18-04-31.312147/results.json b/google/mt5-base/raw_2024-04-19T18-04-31.312147/results.json index 05f28770397e4c96628ec117555702edcfb0dd58..fc3a87c9191fdb23fe58f8fb89a516e83c59a71b 100644 --- a/google/mt5-base/raw_2024-04-19T18-04-31.312147/results.json +++ b/google/mt5-base/raw_2024-04-19T18-04-31.312147/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.030671454944035954, - "mse,all": 7.189419934640523, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.10292072322670376, - "acc,exam_id__USP_2018": 0.2222222222222222, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__UNICAMP_2019": 0.02, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__USP_2024": 0.21951219512195122, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__UNICAMP_2023": 0.046511627906976744, - "acc,exam_id__USP_2021": 0.23076923076923078, - "acc,exam_id__UNICAMP_2022": 0.02564102564102564, - "acc,exam_id__USP_2022": 0.1836734693877551, - "acc,exam_id__USP_2023": 0.18181818181818182, - "acc,exam_id__USP_2020": 0.17857142857142858, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.19314205738278517, - "acc,exam_id__2009": 0.1391304347826087, - "acc,exam_id__2010": 0.20512820512820512, - "acc,exam_id__2022": 0.21804511278195488, - "acc,exam_id__2013": 0.18518518518518517, - "acc,exam_id__2014": 0.1651376146788991, - "acc,exam_id__2016": 0.19008264462809918, - "acc,exam_id__2016_2": 0.15447154471544716, - "acc,exam_id__2015": 0.12605042016806722, - "acc,exam_id__2023": 0.24444444444444444, - "acc,exam_id__2017": 0.1810344827586207, - "acc,exam_id__2012": 0.27586206896551724, - "acc,exam_id__2011": 0.2222222222222222 - }, - "faquad_nli": { - "f1_macro,all": 0.2416752488843117, - "acc,all": 0.5415384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.0028328611898017, - "acc,all": 0.002142857142857143 - }, - "oab_exams": { - "acc,all": 0.22915717539863326, - "acc,exam_id__2011-03": 0.20202020202020202, - "acc,exam_id__2015-18": 0.275, - "acc,exam_id__2012-08": 0.2625, - "acc,exam_id__2016-20": 0.2625, - "acc,exam_id__2014-14": 0.2375, - "acc,exam_id__2014-15": 0.24358974358974358, - "acc,exam_id__2013-10": 0.2625, - "acc,exam_id__2016-20a": 0.275, - "acc,exam_id__2018-25": 0.275, - "acc,exam_id__2010-01": 0.21176470588235294, - "acc,exam_id__2017-24": 0.2125, - "acc,exam_id__2012-09": 0.19480519480519481, - "acc,exam_id__2016-19": 0.19230769230769232, - "acc,exam_id__2012-06a": 0.3125, - "acc,exam_id__2017-23": 0.2125, - "acc,exam_id__2013-11": 0.1875, - "acc,exam_id__2012-06": 0.175, - "acc,exam_id__2015-17": 0.28205128205128205, - "acc,exam_id__2014-13": 0.225, - "acc,exam_id__2011-05": 0.25, - "acc,exam_id__2010-02": 0.21, - "acc,exam_id__2013-12": 0.1625, - "acc,exam_id__2012-07": 0.15, - "acc,exam_id__2015-16": 0.2, - "acc,exam_id__2017-22": 0.2625, - "acc,exam_id__2011-04": 0.2375, - "acc,exam_id__2016-21": 0.225, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.0, - "acc,all": 0.0 - }, - "tweetsentbr": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.030671454944035954, + "mse,all": 7.189419934640523, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.10292072322670376, + "acc,exam_id__USP_2018": 0.2222222222222222, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__UNICAMP_2019": 0.02, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__USP_2024": 0.21951219512195122, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__UNICAMP_2023": 0.046511627906976744, + "acc,exam_id__USP_2021": 0.23076923076923078, + "acc,exam_id__UNICAMP_2022": 0.02564102564102564, + "acc,exam_id__USP_2022": 0.1836734693877551, + "acc,exam_id__USP_2023": 0.18181818181818182, + "acc,exam_id__USP_2020": 0.17857142857142858, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.19314205738278517, + "acc,exam_id__2009": 0.1391304347826087, + "acc,exam_id__2010": 0.20512820512820512, + "acc,exam_id__2022": 0.21804511278195488, + "acc,exam_id__2013": 0.18518518518518517, + "acc,exam_id__2014": 0.1651376146788991, + "acc,exam_id__2016": 0.19008264462809918, + "acc,exam_id__2016_2": 0.15447154471544716, + "acc,exam_id__2015": 0.12605042016806722, + "acc,exam_id__2023": 0.24444444444444444, + "acc,exam_id__2017": 0.1810344827586207, + "acc,exam_id__2012": 0.27586206896551724, + "acc,exam_id__2011": 0.2222222222222222 + }, + "faquad_nli": { + "f1_macro,all": 0.3625128733264676, + "acc,all": 0.5415384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.004249291784702549, + "acc,all": 0.002142857142857143 + }, + "oab_exams": { + "acc,all": 0.22915717539863326, + "acc,exam_id__2011-03": 0.20202020202020202, + "acc,exam_id__2015-18": 0.275, + "acc,exam_id__2012-08": 0.2625, + "acc,exam_id__2016-20": 0.2625, + "acc,exam_id__2014-14": 0.2375, + "acc,exam_id__2014-15": 0.24358974358974358, + "acc,exam_id__2013-10": 0.2625, + "acc,exam_id__2016-20a": 0.275, + "acc,exam_id__2018-25": 0.275, + "acc,exam_id__2010-01": 0.21176470588235294, + "acc,exam_id__2017-24": 0.2125, + "acc,exam_id__2012-09": 0.19480519480519481, + "acc,exam_id__2016-19": 0.19230769230769232, + "acc,exam_id__2012-06a": 0.3125, + "acc,exam_id__2017-23": 0.2125, + "acc,exam_id__2013-11": 0.1875, + "acc,exam_id__2012-06": 0.175, + "acc,exam_id__2015-17": 0.28205128205128205, + "acc,exam_id__2014-13": 0.225, + "acc,exam_id__2011-05": 0.25, + "acc,exam_id__2010-02": 0.21, + "acc,exam_id__2013-12": 0.1625, + "acc,exam_id__2012-07": 0.15, + "acc,exam_id__2015-16": 0.2, + "acc,exam_id__2017-22": 0.2625, + "acc,exam_id__2011-04": 0.2375, + "acc,exam_id__2016-21": 0.225, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.0, + "acc,all": 0.0 + }, + "tweetsentbr": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2eb15465c5dd7f72a8f7984306ad05ebc3dd1e1f", - "model_dtype": "torch.float16", - "model_memory_footprint": 1240300032, - "model_num_parameters": 582401280, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2048, - "max_ctx_length": 2048, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1119.2859477124182, - "min_seq_length": 1098, - "max_seq_length": 1188, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1203.2859477124182, - "min_seq_length": 1182, - "max_seq_length": 1272, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1350.0709318497914, - "min_seq_length": 1045, - "max_seq_length": 1953, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 3, - "non_truncated": 1426, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 4, - "mean_seq_length": 1387.624212736179, - "min_seq_length": 1144, - "max_seq_length": 2516, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972008397480754 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1401.1923076923076, - "min_seq_length": 1350, - "max_seq_length": 1499, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2eb15465c5dd7f72a8f7984306ad05ebc3dd1e1f", + "model_dtype": "torch.float16", + "model_memory_footprint": 1240300032, + "model_num_parameters": 582401280, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2048, + "max_ctx_length": 2048, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1025.8842857142856, - "min_seq_length": 1006, - "max_seq_length": 1274, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1258.8929384965832, - "min_seq_length": 1007, - "max_seq_length": 1666, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1119.2859477124182, + "min_seq_length": 1098, + "max_seq_length": 1188, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1203.2859477124182, + "min_seq_length": 1182, + "max_seq_length": 1272, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1350.0709318497914, + "min_seq_length": 1045, + "max_seq_length": 1953, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 3, + "non_truncated": 1426, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 4, + "mean_seq_length": 1387.624212736179, + "min_seq_length": 1144, + "max_seq_length": 2516, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972008397480754 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1401.1923076923076, + "min_seq_length": 1350, + "max_seq_length": 1499, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1025.8842857142856, + "min_seq_length": 1006, + "max_seq_length": 1274, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1258.8929384965832, + "min_seq_length": 1007, + "max_seq_length": 1666, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1516.3290246768508, + "min_seq_length": 1483, + "max_seq_length": 1542, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1376.9557213930348, + "min_seq_length": 1358, + "max_seq_length": 1430, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1516.3290246768508, - "min_seq_length": 1483, - "max_seq_length": 1542, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=google/mt5-base,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1376.9557213930348, - "min_seq_length": 1358, - "max_seq_length": 1430, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=google/mt5-base,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/google/mt5-base/results_2024-04-18T06-10-32.403150.json b/google/mt5-base/results_2024-04-18T06-10-32.403150.json index 2add1d85a6f15b11a03087e296db257311051859..9af72dcd5057abd6a847bddf85c8a50e772e1007 100644 --- a/google/mt5-base/results_2024-04-18T06-10-32.403150.json +++ b/google/mt5-base/results_2024-04-18T06-10-32.403150.json @@ -34,16 +34,16 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.0887427378848531, - "all_grouped_npm": -0.4412388423549494, + "all_grouped_average": 0.10163407368631462, + "all_grouped_npm": -0.41749451024241196, "all_grouped": { "enem_challenge": 0.18964310706787962, "bluex": 0.10152990264255911, "oab_exams": 0.23189066059225513, - "assin2_rte": 0.0005442176870748299, + "assin2_rte": 0.0008163265306122449, "assin2_sts": 0.0435769262346769, - "faquad_nli": 0.22681159420289854, - "hatebr_offensive": 0.004688232536333802, + "faquad_nli": 0.34021739130434786, + "hatebr_offensive": 0.007032348804500704, "portuguese_hate_speech": 0.0, "tweetsentbr": 0.0 }, @@ -51,10 +51,10 @@ "harness|enem_challenge|enem_challenge|None|3": 0.18964310706787962, "harness|bluex|bluex|None|3": 0.10152990264255911, "harness|oab_exams|oab_exams|None|3": 0.23189066059225513, - "harness|assin2_rte|assin2_rte|None|15": 0.0005442176870748299, + "harness|assin2_rte|assin2_rte|None|15": 0.0008163265306122449, "harness|assin2_sts|assin2_sts|None|15": 0.0435769262346769, - "harness|faquad_nli|faquad_nli|None|15": 0.22681159420289854, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.004688232536333802, + "harness|faquad_nli|faquad_nli|None|15": 0.34021739130434786, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.007032348804500704, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0, "harness|tweetsentbr|tweetsentbr|None|25": 0.0 }, @@ -125,9 +125,9 @@ "main_score": 0.23189066059225513 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.0005442176870748299, + "f1_macro,all": 0.0008163265306122449, "acc,all": 0.0004084967320261438, - "main_score": 0.0005442176870748299 + "main_score": 0.0008163265306122449 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.0435769262346769, @@ -135,14 +135,14 @@ "main_score": 0.0435769262346769 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.22681159420289854, + "f1_macro,all": 0.34021739130434786, "acc,all": 0.4815384615384615, - "main_score": 0.22681159420289854 + "main_score": 0.34021739130434786 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.004688232536333802, + "f1_macro,all": 0.007032348804500704, "acc,all": 0.0035714285714285713, - "main_score": 0.004688232536333802 + "main_score": 0.007032348804500704 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { "f1_macro,all": 0.0, diff --git a/google/mt5-base/results_2024-04-19T18-04-31.312147.json b/google/mt5-base/results_2024-04-19T18-04-31.312147.json index cfcd8090e982e9c8b92bfd863dcc19b9470a1287..d31de42653b621bddb8fd02c3a1398616f4103a2 100644 --- a/google/mt5-base/results_2024-04-19T18-04-31.312147.json +++ b/google/mt5-base/results_2024-04-19T18-04-31.312147.json @@ -34,16 +34,16 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.08893328011403018, - "all_grouped_npm": -0.4398897430825887, + "all_grouped_average": 0.10251706400703647, + "all_grouped_npm": -0.41489409338295474, "all_grouped": { "enem_challenge": 0.19314205738278517, "bluex": 0.10292072322670376, "oab_exams": 0.22915717539863326, "assin2_rte": 0.0, "assin2_sts": 0.030671454944035954, - "faquad_nli": 0.2416752488843117, - "hatebr_offensive": 0.0028328611898017, + "faquad_nli": 0.3625128733264676, + "hatebr_offensive": 0.004249291784702549, "portuguese_hate_speech": 0.0, "tweetsentbr": 0.0 }, @@ -53,8 +53,8 @@ "harness|oab_exams|oab_exams|None|3": 0.22915717539863326, "harness|assin2_rte|assin2_rte|None|15": 0.0, "harness|assin2_sts|assin2_sts|None|15": 0.030671454944035954, - "harness|faquad_nli|faquad_nli|None|15": 0.2416752488843117, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.0028328611898017, + "harness|faquad_nli|faquad_nli|None|15": 0.3625128733264676, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.004249291784702549, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0, "harness|tweetsentbr|tweetsentbr|None|25": 0.0 }, @@ -135,14 +135,14 @@ "main_score": 0.030671454944035954 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.2416752488843117, + "f1_macro,all": 0.3625128733264676, "acc,all": 0.5415384615384615, - "main_score": 0.2416752488843117 + "main_score": 0.3625128733264676 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.0028328611898017, + "f1_macro,all": 0.004249291784702549, "acc,all": 0.002142857142857143, - "main_score": 0.0028328611898017 + "main_score": 0.004249291784702549 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { "f1_macro,all": 0.0, diff --git a/google/mt5-small/raw_2024-04-18T05-31-05.645206/results.json b/google/mt5-small/raw_2024-04-18T05-31-05.645206/results.json index a66cc586789cc2b73dd961a68f56b72276d043ca..545c1468fa12978843faef51b47916cb0b7885c7 100644 --- a/google/mt5-small/raw_2024-04-18T05-31-05.645206/results.json +++ b/google/mt5-small/raw_2024-04-18T05-31-05.645206/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.0005442176870748299, - "acc,all": 0.0004084967320261438, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.07222006970775031, - "mse,all": 7.337785947712419, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__USP_2024": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__UNICAMP_2019": 0.0, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__USP_2023": 0.0, - "acc,exam_id__USP_2022": 0.0, - "acc,exam_id__USP_2018": 0.0, - "acc,exam_id__USP_2020": 0.0, - "acc,exam_id__USP_2021": 0.0, - "acc,exam_id__UNICAMP_2022": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.0, - "acc,exam_id__2022": 0.0, - "acc,exam_id__2016": 0.0, - "acc,exam_id__2013": 0.0, - "acc,exam_id__2016_2": 0.0, - "acc,exam_id__2009": 0.0, - "acc,exam_id__2023": 0.0, - "acc,exam_id__2010": 0.0, - "acc,exam_id__2012": 0.0, - "acc,exam_id__2017": 0.0, - "acc,exam_id__2014": 0.0, - "acc,exam_id__2015": 0.0, - "acc,exam_id__2011": 0.0 - }, - "faquad_nli": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.0, - "acc,all": 0.0 - }, - "oab_exams": { - "acc,all": 0.0, - "acc,exam_id__2012-08": 0.0, - "acc,exam_id__2016-20": 0.0, - "acc,exam_id__2014-14": 0.0, - "acc,exam_id__2011-05": 0.0, - "acc,exam_id__2011-03": 0.0, - "acc,exam_id__2012-06a": 0.0, - "acc,exam_id__2017-22": 0.0, - "acc,exam_id__2011-04": 0.0, - "acc,exam_id__2010-01": 0.0, - "acc,exam_id__2012-09": 0.0, - "acc,exam_id__2014-13": 0.0, - "acc,exam_id__2015-17": 0.0, - "acc,exam_id__2015-16": 0.0, - "acc,exam_id__2017-24": 0.0, - "acc,exam_id__2016-19": 0.0, - "acc,exam_id__2010-02": 0.0, - "acc,exam_id__2014-15": 0.0, - "acc,exam_id__2016-20a": 0.0, - "acc,exam_id__2013-11": 0.0, - "acc,exam_id__2015-18": 0.0, - "acc,exam_id__2016-21": 0.0, - "acc,exam_id__2017-23": 0.0, - "acc,exam_id__2012-07": 0.0, - "acc,exam_id__2018-25": 0.0, - "acc,exam_id__2013-12": 0.0, - "acc,exam_id__2012-06": 0.0, - "acc,exam_id__2013-10": 0.0, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.0, - "acc,all": 0.0 - }, - "tweetsentbr": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.0008163265306122449, + "acc,all": 0.0004084967320261438, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.07222006970775031, + "mse,all": 7.337785947712419, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__USP_2024": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__UNICAMP_2019": 0.0, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__USP_2023": 0.0, + "acc,exam_id__USP_2022": 0.0, + "acc,exam_id__USP_2018": 0.0, + "acc,exam_id__USP_2020": 0.0, + "acc,exam_id__USP_2021": 0.0, + "acc,exam_id__UNICAMP_2022": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.0, + "acc,exam_id__2022": 0.0, + "acc,exam_id__2016": 0.0, + "acc,exam_id__2013": 0.0, + "acc,exam_id__2016_2": 0.0, + "acc,exam_id__2009": 0.0, + "acc,exam_id__2023": 0.0, + "acc,exam_id__2010": 0.0, + "acc,exam_id__2012": 0.0, + "acc,exam_id__2017": 0.0, + "acc,exam_id__2014": 0.0, + "acc,exam_id__2015": 0.0, + "acc,exam_id__2011": 0.0 + }, + "faquad_nli": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.0, + "acc,all": 0.0 + }, + "oab_exams": { + "acc,all": 0.0, + "acc,exam_id__2012-08": 0.0, + "acc,exam_id__2016-20": 0.0, + "acc,exam_id__2014-14": 0.0, + "acc,exam_id__2011-05": 0.0, + "acc,exam_id__2011-03": 0.0, + "acc,exam_id__2012-06a": 0.0, + "acc,exam_id__2017-22": 0.0, + "acc,exam_id__2011-04": 0.0, + "acc,exam_id__2010-01": 0.0, + "acc,exam_id__2012-09": 0.0, + "acc,exam_id__2014-13": 0.0, + "acc,exam_id__2015-17": 0.0, + "acc,exam_id__2015-16": 0.0, + "acc,exam_id__2017-24": 0.0, + "acc,exam_id__2016-19": 0.0, + "acc,exam_id__2010-02": 0.0, + "acc,exam_id__2014-15": 0.0, + "acc,exam_id__2016-20a": 0.0, + "acc,exam_id__2013-11": 0.0, + "acc,exam_id__2015-18": 0.0, + "acc,exam_id__2016-21": 0.0, + "acc,exam_id__2017-23": 0.0, + "acc,exam_id__2012-07": 0.0, + "acc,exam_id__2018-25": 0.0, + "acc,exam_id__2013-12": 0.0, + "acc,exam_id__2012-06": 0.0, + "acc,exam_id__2013-10": 0.0, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.0, + "acc,all": 0.0 + }, + "tweetsentbr": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "73fb5dbe4756edadc8fbe8c769b0a109493acf7a", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 600353536, - "model_num_parameters": 300176768, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2048, - "max_ctx_length": 2048, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1119.2859477124182, - "min_seq_length": 1098, - "max_seq_length": 1188, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1203.2859477124182, - "min_seq_length": 1182, - "max_seq_length": 1272, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1350.0709318497914, - "min_seq_length": 1045, - "max_seq_length": 1953, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 3, - "non_truncated": 1426, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 4, - "mean_seq_length": 1387.624212736179, - "min_seq_length": 1144, - "max_seq_length": 2516, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972008397480754 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1401.1923076923076, - "min_seq_length": 1350, - "max_seq_length": 1499, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "73fb5dbe4756edadc8fbe8c769b0a109493acf7a", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 600353536, + "model_num_parameters": 300176768, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2048, + "max_ctx_length": 2048, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1025.8842857142856, - "min_seq_length": 1006, - "max_seq_length": 1274, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1258.8929384965832, - "min_seq_length": 1007, - "max_seq_length": 1666, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1119.2859477124182, + "min_seq_length": 1098, + "max_seq_length": 1188, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1203.2859477124182, + "min_seq_length": 1182, + "max_seq_length": 1272, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1350.0709318497914, + "min_seq_length": 1045, + "max_seq_length": 1953, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 3, + "non_truncated": 1426, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 4, + "mean_seq_length": 1387.624212736179, + "min_seq_length": 1144, + "max_seq_length": 2516, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972008397480754 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1401.1923076923076, + "min_seq_length": 1350, + "max_seq_length": 1499, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1025.8842857142856, + "min_seq_length": 1006, + "max_seq_length": 1274, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1258.8929384965832, + "min_seq_length": 1007, + "max_seq_length": 1666, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1516.3290246768508, + "min_seq_length": 1483, + "max_seq_length": 1542, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1376.9557213930348, + "min_seq_length": 1358, + "max_seq_length": 1430, + "max_ctx_length": 2048, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1516.3290246768508, - "min_seq_length": 1483, - "max_seq_length": 1542, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=google/mt5-small,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1376.9557213930348, - "min_seq_length": 1358, - "max_seq_length": 1430, - "max_ctx_length": 2048, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=google/mt5-small,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/google/mt5-small/results_2024-04-18T05-31-05.645206.json b/google/mt5-small/results_2024-04-18T05-31-05.645206.json index 4f03d8c2fd48337f72bc5857d1f54073a1dfb4aa..68e2637462823324543f3afcf233ca933f41a49f 100644 --- a/google/mt5-small/results_2024-04-18T05-31-05.645206.json +++ b/google/mt5-small/results_2024-04-18T05-31-05.645206.json @@ -34,13 +34,13 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.008084920821647239, - "all_grouped_npm": -0.5606737486282873, + "all_grouped_average": 0.00811515513759584, + "all_grouped_npm": -0.5606132799963901, "all_grouped": { "enem_challenge": 0.0, "bluex": 0.0, "oab_exams": 0.0, - "assin2_rte": 0.0005442176870748299, + "assin2_rte": 0.0008163265306122449, "assin2_sts": 0.07222006970775031, "faquad_nli": 0.0, "hatebr_offensive": 0.0, @@ -51,7 +51,7 @@ "harness|enem_challenge|enem_challenge|None|3": 0.0, "harness|bluex|bluex|None|3": 0.0, "harness|oab_exams|oab_exams|None|3": 0.0, - "harness|assin2_rte|assin2_rte|None|15": 0.0005442176870748299, + "harness|assin2_rte|assin2_rte|None|15": 0.0008163265306122449, "harness|assin2_sts|assin2_sts|None|15": 0.07222006970775031, "harness|faquad_nli|faquad_nli|None|15": 0.0, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.0, @@ -125,9 +125,9 @@ "main_score": 0.0 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.0005442176870748299, + "f1_macro,all": 0.0008163265306122449, "acc,all": 0.0004084967320261438, - "main_score": 0.0005442176870748299 + "main_score": 0.0008163265306122449 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.07222006970775031, diff --git a/grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge/raw_2024-08-11T16-25-38.137060/results.json b/grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge/raw_2024-08-11T16-25-38.137060/results.json index abfce719dd9f898e6033094a1491667c4783727d..442eb618b0d679d6a92093619d01f756478e87f5 100644 --- a/grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge/raw_2024-08-11T16-25-38.137060/results.json +++ b/grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge/raw_2024-08-11T16-25-38.137060/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9129288088010328, - "acc,all": 0.9129901960784313, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7129374319103543, - "mse,all": 1.2205234750816993, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5730180806675939, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__USP_2022": 0.6326530612244898, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__USP_2019": 0.5, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6913925822253324, - "acc,exam_id__2011": 0.7435897435897436, - "acc,exam_id__2010": 0.717948717948718, - "acc,exam_id__2009": 0.7043478260869566, - "acc,exam_id__2015": 0.6974789915966386, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2014": 0.6697247706422018, - "acc,exam_id__2022": 0.6691729323308271, - "acc,exam_id__2012": 0.6896551724137931, - "acc,exam_id__2023": 0.7481481481481481, - "acc,exam_id__2016_2": 0.6422764227642277, - "acc,exam_id__2016": 0.6611570247933884, - "acc,exam_id__2013": 0.6759259259259259 - }, - "faquad_nli": { - "f1_macro,all": 0.7097079326156245, - "acc,all": 0.7523076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8569250386831352, - "acc,all": 0.8578571428571429 - }, - "oab_exams": { - "acc,all": 0.4947608200455581, - "acc,exam_id__2017-24": 0.4625, - "acc,exam_id__2013-12": 0.525, - "acc,exam_id__2015-18": 0.4875, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2016-19": 0.5641025641025641, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2016-20": 0.5125, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2011-04": 0.5, - "acc,exam_id__2014-15": 0.5512820512820513, - "acc,exam_id__2010-02": 0.51, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2012-07": 0.4875, - "acc,exam_id__2015-17": 0.6282051282051282, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2012-09": 0.4935064935064935, - "acc,exam_id__2015-16": 0.5, - "acc,exam_id__2012-08": 0.525, - "acc,exam_id__2011-03": 0.46464646464646464, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2014-13": 0.4125, - "acc,exam_id__2017-23": 0.475, - "acc,exam_id__2012-06": 0.525, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2014-14": 0.5875, - "acc,exam_id__2012-06a": 0.5875, - "acc,exam_id__2017-22": 0.5875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6963111124817714, - "acc,all": 0.7144535840188014 - }, - "tweetsentbr": { - "f1_macro,all": 0.475676000471147, - "acc,all": 0.7089552238805971, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9129288088010328, + "acc,all": 0.9129901960784313, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7129374319103543, + "mse,all": 1.2205234750816993, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5730180806675939, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__USP_2022": 0.6326530612244898, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__USP_2019": 0.5, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6913925822253324, + "acc,exam_id__2011": 0.7435897435897436, + "acc,exam_id__2010": 0.717948717948718, + "acc,exam_id__2009": 0.7043478260869566, + "acc,exam_id__2015": 0.6974789915966386, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2014": 0.6697247706422018, + "acc,exam_id__2022": 0.6691729323308271, + "acc,exam_id__2012": 0.6896551724137931, + "acc,exam_id__2023": 0.7481481481481481, + "acc,exam_id__2016_2": 0.6422764227642277, + "acc,exam_id__2016": 0.6611570247933884, + "acc,exam_id__2013": 0.6759259259259259 + }, + "faquad_nli": { + "f1_macro,all": 0.7097079326156245, + "acc,all": 0.7523076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8569250386831352, + "acc,all": 0.8578571428571429 + }, + "oab_exams": { + "acc,all": 0.4947608200455581, + "acc,exam_id__2017-24": 0.4625, + "acc,exam_id__2013-12": 0.525, + "acc,exam_id__2015-18": 0.4875, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2016-19": 0.5641025641025641, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2016-20": 0.5125, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2011-04": 0.5, + "acc,exam_id__2014-15": 0.5512820512820513, + "acc,exam_id__2010-02": 0.51, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2012-07": 0.4875, + "acc,exam_id__2015-17": 0.6282051282051282, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2012-09": 0.4935064935064935, + "acc,exam_id__2015-16": 0.5, + "acc,exam_id__2012-08": 0.525, + "acc,exam_id__2011-03": 0.46464646464646464, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2014-13": 0.4125, + "acc,exam_id__2017-23": 0.475, + "acc,exam_id__2012-06": 0.525, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2014-14": 0.5875, + "acc,exam_id__2012-06a": 0.5875, + "acc,exam_id__2017-22": 0.5875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6963111124817714, + "acc,all": 0.7144535840188014 + }, + "tweetsentbr": { + "f1_macro,all": 0.6342346672948627, + "acc,all": 0.7089552238805971, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "7a8d334dce0a2ce948f75612b8d3a61c53d094aa", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530944, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "7a8d334dce0a2ce948f75612b8d3a61c53d094aa", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530944, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge/results_2024-08-11T16-25-38.137060.json b/grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge/results_2024-08-11T16-25-38.137060.json index 209795c79634e9594e7fef7b55202cce9491e215..870225c9db12056c71a3cc5b816cb347033e55d1 100644 --- a/grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge/results_2024-08-11T16-25-38.137060.json +++ b/grimjim/Llama-3-Instruct-8B-SPPO-Iter3-SimPO-merge/results_2024-08-11T16-25-38.137060.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6804064231001721, - "all_grouped_npm": 0.5272804327357711, + "all_grouped_average": 0.6980240527472517, + "all_grouped_npm": 0.5534971435201156, "all_grouped": { "enem_challenge": 0.6913925822253324, "bluex": 0.5730180806675939, @@ -45,7 +45,7 @@ "faquad_nli": 0.7097079326156245, "hatebr_offensive": 0.8569250386831352, "portuguese_hate_speech": 0.6963111124817714, - "tweetsentbr": 0.475676000471147 + "tweetsentbr": 0.6342346672948627 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6913925822253324, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7097079326156245, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8569250386831352, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6963111124817714, - "harness|tweetsentbr|tweetsentbr|None|25": 0.475676000471147 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6342346672948627 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6913925822253324, @@ -150,9 +150,9 @@ "main_score": 0.6963111124817714 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.475676000471147, + "f1_macro,all": 0.6342346672948627, "acc,all": 0.7089552238805971, - "main_score": 0.475676000471147 + "main_score": 0.6342346672948627 } }, "config_tasks": { diff --git a/hkust-nlp/deita-7b-v1.0/raw_2024-05-20T23-09-35.076966/results.json b/hkust-nlp/deita-7b-v1.0/raw_2024-05-20T23-09-35.076966/results.json index dc26fb5dc5a00ad10e0546a0fa1a56d471e3d744..7b38690dc9bc9e313cd57c8d613f21d3f6c1f58a 100644 --- a/hkust-nlp/deita-7b-v1.0/raw_2024-05-20T23-09-35.076966/results.json +++ b/hkust-nlp/deita-7b-v1.0/raw_2024-05-20T23-09-35.076966/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8520896848392872, - "acc,all": 0.8537581699346405, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7580664670996959, - "mse,all": 0.5798937908496731, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.45897079276773295, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__USP_2020": 0.375, - "acc,exam_id__UNICAMP_2023": 0.4418604651162791, - "acc,exam_id__USP_2018": 0.4074074074074074, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2024": 0.6585365853658537, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__USP_2022": 0.4489795918367347, - "acc,exam_id__UNICAMP_2018": 0.35185185185185186, - "acc,exam_id__USP_2021": 0.36538461538461536, - "acc,exam_id__UNICAMP_2021_2": 0.43137254901960786, - "acc,exam_id__UNICAMP_2021_1": 0.41304347826086957, - "acc,exam_id__UNICAMP_2020": 0.4727272727272727, - "acc,exam_id__UNICAMP_2019": 0.58, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5990202939118264, - "acc,exam_id__2022": 0.6466165413533834, - "acc,exam_id__2012": 0.5603448275862069, - "acc,exam_id__2010": 0.5897435897435898, - "acc,exam_id__2013": 0.5925925925925926, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2023": 0.5925925925925926, - "acc,exam_id__2017": 0.5172413793103449, - "acc,exam_id__2016": 0.5537190082644629, - "acc,exam_id__2015": 0.5546218487394958, - "acc,exam_id__2014": 0.6513761467889908, - "acc,exam_id__2016_2": 0.6097560975609756, - "acc,exam_id__2011": 0.7008547008547008 - }, - "faquad_nli": { - "f1_macro,all": 0.7632014424026872, - "acc,all": 0.8184615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8462835603587262, - "acc,all": 0.8464285714285714 - }, - "oab_exams": { - "acc,all": 0.4, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2016-20a": 0.3625, - "acc,exam_id__2017-24": 0.4125, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2014-13": 0.3625, - "acc,exam_id__2017-22": 0.4875, - "acc,exam_id__2012-06": 0.325, - "acc,exam_id__2017-23": 0.4, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2012-06a": 0.4125, - "acc,exam_id__2014-14": 0.3875, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2015-18": 0.425, - "acc,exam_id__2013-11": 0.4125, - "acc,exam_id__2010-02": 0.4, - "acc,exam_id__2011-04": 0.3875, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2018-25": 0.4, - "acc,exam_id__2015-17": 0.47435897435897434, - "acc,exam_id__2011-03": 0.36363636363636365, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2013-12": 0.475, - "acc,exam_id__2016-20": 0.3625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6308975342496719, - "acc,all": 0.6392479435957696 - }, - "tweetsentbr": { - "f1_macro,all": 0.4950827242707184, - "acc,all": 0.6895522388059702, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8520896848392872, + "acc,all": 0.8537581699346405, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7580664670996959, + "mse,all": 0.5798937908496731, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.45897079276773295, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__USP_2020": 0.375, + "acc,exam_id__UNICAMP_2023": 0.4418604651162791, + "acc,exam_id__USP_2018": 0.4074074074074074, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2024": 0.6585365853658537, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__USP_2022": 0.4489795918367347, + "acc,exam_id__UNICAMP_2018": 0.35185185185185186, + "acc,exam_id__USP_2021": 0.36538461538461536, + "acc,exam_id__UNICAMP_2021_2": 0.43137254901960786, + "acc,exam_id__UNICAMP_2021_1": 0.41304347826086957, + "acc,exam_id__UNICAMP_2020": 0.4727272727272727, + "acc,exam_id__UNICAMP_2019": 0.58, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5990202939118264, + "acc,exam_id__2022": 0.6466165413533834, + "acc,exam_id__2012": 0.5603448275862069, + "acc,exam_id__2010": 0.5897435897435898, + "acc,exam_id__2013": 0.5925925925925926, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2023": 0.5925925925925926, + "acc,exam_id__2017": 0.5172413793103449, + "acc,exam_id__2016": 0.5537190082644629, + "acc,exam_id__2015": 0.5546218487394958, + "acc,exam_id__2014": 0.6513761467889908, + "acc,exam_id__2016_2": 0.6097560975609756, + "acc,exam_id__2011": 0.7008547008547008 + }, + "faquad_nli": { + "f1_macro,all": 0.7632014424026872, + "acc,all": 0.8184615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8462835603587262, + "acc,all": 0.8464285714285714 + }, + "oab_exams": { + "acc,all": 0.4, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2016-20a": 0.3625, + "acc,exam_id__2017-24": 0.4125, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2014-13": 0.3625, + "acc,exam_id__2017-22": 0.4875, + "acc,exam_id__2012-06": 0.325, + "acc,exam_id__2017-23": 0.4, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2012-06a": 0.4125, + "acc,exam_id__2014-14": 0.3875, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2015-18": 0.425, + "acc,exam_id__2013-11": 0.4125, + "acc,exam_id__2010-02": 0.4, + "acc,exam_id__2011-04": 0.3875, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2018-25": 0.4, + "acc,exam_id__2015-17": 0.47435897435897434, + "acc,exam_id__2011-03": 0.36363636363636365, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2013-12": 0.475, + "acc,exam_id__2016-20": 0.3625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6308975342496719, + "acc,all": 0.6392479435957696 + }, + "tweetsentbr": { + "f1_macro,all": 0.6601102990276245, + "acc,all": 0.6895522388059702, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "de16c0994d14f18a59fd41f1fb6cc1bc96aa1663", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "de16c0994d14f18a59fd41f1fb6cc1bc96aa1663", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=hkust-nlp/deita-7b-v1.0,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=hkust-nlp/deita-7b-v1.0,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/hkust-nlp/deita-7b-v1.0/results_2024-05-20T23-09-35.076966.json b/hkust-nlp/deita-7b-v1.0/results_2024-05-20T23-09-35.076966.json index e1a71f4dbd0160a8902087a806da3703b982730c..758284098753cbb77c7ed5f07153c9df9a3eae12 100644 --- a/hkust-nlp/deita-7b-v1.0/results_2024-05-20T23-09-35.076966.json +++ b/hkust-nlp/deita-7b-v1.0/results_2024-05-20T23-09-35.076966.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6448458333222606, - "all_grouped_npm": 0.47337551466198996, + "all_grouped_average": 0.6631822305174725, + "all_grouped_npm": 0.5006618200120074, "all_grouped": { "enem_challenge": 0.5990202939118264, "bluex": 0.45897079276773295, @@ -45,7 +45,7 @@ "faquad_nli": 0.7632014424026872, "hatebr_offensive": 0.8462835603587262, "portuguese_hate_speech": 0.6308975342496719, - "tweetsentbr": 0.4950827242707184 + "tweetsentbr": 0.6601102990276245 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5990202939118264, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7632014424026872, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8462835603587262, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6308975342496719, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4950827242707184 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6601102990276245 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5990202939118264, @@ -150,9 +150,9 @@ "main_score": 0.6308975342496719 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4950827242707184, + "f1_macro,all": 0.6601102990276245, "acc,all": 0.6895522388059702, - "main_score": 0.4950827242707184 + "main_score": 0.6601102990276245 } }, "config_tasks": { diff --git a/ibivibiv/llama-3-nectar-dpo-8B/raw_2024-06-22T07-38-35.422279/results.json b/ibivibiv/llama-3-nectar-dpo-8B/raw_2024-06-22T07-38-35.422279/results.json index dbc542989532475c8e812fa217f925260e6a006f..422f9b02acd7f50042e86dcb8cb4342637c2a5bf 100644 --- a/ibivibiv/llama-3-nectar-dpo-8B/raw_2024-06-22T07-38-35.422279/results.json +++ b/ibivibiv/llama-3-nectar-dpo-8B/raw_2024-06-22T07-38-35.422279/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9082379542568665, - "acc,all": 0.9084967320261438, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7521023848238035, - "mse,all": 0.6005228758169935, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5757997218358831, - "acc,exam_id__USP_2022": 0.6122448979591837, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__USP_2021": 0.5769230769230769, - "acc,exam_id__UNICAMP_2024": 0.6444444444444445, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2019": 0.6, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7039888033589923, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2012": 0.7068965517241379, - "acc,exam_id__2022": 0.6691729323308271, - "acc,exam_id__2016": 0.6776859504132231, - "acc,exam_id__2010": 0.717948717948718, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2014": 0.7064220183486238, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2016_2": 0.6829268292682927, - "acc,exam_id__2015": 0.7226890756302521, - "acc,exam_id__2009": 0.7130434782608696, - "acc,exam_id__2023": 0.7703703703703704 - }, - "faquad_nli": { - "f1_macro,all": 0.751649303344456, - "acc,all": 0.7923076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8684745291939572, - "acc,all": 0.8685714285714285 - }, - "oab_exams": { - "acc,all": 0.5056947608200456, - "acc,exam_id__2013-11": 0.55, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2012-06": 0.55, - "acc,exam_id__2013-10": 0.4875, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2012-08": 0.55, - "acc,exam_id__2015-18": 0.4875, - "acc,exam_id__2017-24": 0.4375, - "acc,exam_id__2017-22": 0.5875, - "acc,exam_id__2016-19": 0.5641025641025641, - "acc,exam_id__2010-02": 0.55, - "acc,exam_id__2012-07": 0.525, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2014-15": 0.5512820512820513, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2015-16": 0.525, - "acc,exam_id__2016-20a": 0.425, - "acc,exam_id__2014-14": 0.6, - "acc,exam_id__2018-25": 0.5, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2014-13": 0.4375, - "acc,exam_id__2013-12": 0.5375, - "acc,exam_id__2016-20": 0.55, - "acc,exam_id__2012-06a": 0.575, - "acc,exam_id__2011-03": 0.4444444444444444, - "acc,exam_id__2011-04": 0.5125, - "acc,exam_id__2015-17": 0.6666666666666666, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5970911688484284, - "acc,all": 0.5981198589894242 - }, - "tweetsentbr": { - "f1_macro,all": 0.49024741231838487, - "acc,all": 0.7179104477611941, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9082379542568665, + "acc,all": 0.9084967320261438, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7521023848238035, + "mse,all": 0.6005228758169935, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5757997218358831, + "acc,exam_id__USP_2022": 0.6122448979591837, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__USP_2021": 0.5769230769230769, + "acc,exam_id__UNICAMP_2024": 0.6444444444444445, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2019": 0.6, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7039888033589923, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2012": 0.7068965517241379, + "acc,exam_id__2022": 0.6691729323308271, + "acc,exam_id__2016": 0.6776859504132231, + "acc,exam_id__2010": 0.717948717948718, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2014": 0.7064220183486238, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2016_2": 0.6829268292682927, + "acc,exam_id__2015": 0.7226890756302521, + "acc,exam_id__2009": 0.7130434782608696, + "acc,exam_id__2023": 0.7703703703703704 + }, + "faquad_nli": { + "f1_macro,all": 0.751649303344456, + "acc,all": 0.7923076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8684745291939572, + "acc,all": 0.8685714285714285 + }, + "oab_exams": { + "acc,all": 0.5056947608200456, + "acc,exam_id__2013-11": 0.55, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2012-06": 0.55, + "acc,exam_id__2013-10": 0.4875, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2012-08": 0.55, + "acc,exam_id__2015-18": 0.4875, + "acc,exam_id__2017-24": 0.4375, + "acc,exam_id__2017-22": 0.5875, + "acc,exam_id__2016-19": 0.5641025641025641, + "acc,exam_id__2010-02": 0.55, + "acc,exam_id__2012-07": 0.525, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2014-15": 0.5512820512820513, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2015-16": 0.525, + "acc,exam_id__2016-20a": 0.425, + "acc,exam_id__2014-14": 0.6, + "acc,exam_id__2018-25": 0.5, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2014-13": 0.4375, + "acc,exam_id__2013-12": 0.5375, + "acc,exam_id__2016-20": 0.55, + "acc,exam_id__2012-06a": 0.575, + "acc,exam_id__2011-03": 0.4444444444444444, + "acc,exam_id__2011-04": 0.5125, + "acc,exam_id__2015-17": 0.6666666666666666, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5970911688484284, + "acc,all": 0.5981198589894242 + }, + "tweetsentbr": { + "f1_macro,all": 0.6536632164245132, + "acc,all": 0.7179104477611941, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "55b6eda126756b92fe5cbd09ca2a8ce245e4f491", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530688, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "55b6eda126756b92fe5cbd09ca2a8ce245e4f491", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530688, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=ibivibiv/llama-3-nectar-dpo-8B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=ibivibiv/llama-3-nectar-dpo-8B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/ibivibiv/llama-3-nectar-dpo-8B/results_2024-06-22T07-38-35.422279.json b/ibivibiv/llama-3-nectar-dpo-8B/results_2024-06-22T07-38-35.422279.json index 13fa6c5e35049bd76d47851a864da7c953b1103c..680584f0f6417bbf94a17d5630a8aca692b8513f 100644 --- a/ibivibiv/llama-3-nectar-dpo-8B/results_2024-06-22T07-38-35.422279.json +++ b/ibivibiv/llama-3-nectar-dpo-8B/results_2024-06-22T07-38-35.422279.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6836984487556463, - "all_grouped_npm": 0.5267399559990615, + "all_grouped_average": 0.7018557603229939, + "all_grouped_npm": 0.5537597648790431, "all_grouped": { "enem_challenge": 0.7039888033589923, "bluex": 0.5757997218358831, @@ -45,7 +45,7 @@ "faquad_nli": 0.751649303344456, "hatebr_offensive": 0.8684745291939572, "portuguese_hate_speech": 0.5970911688484284, - "tweetsentbr": 0.49024741231838487 + "tweetsentbr": 0.6536632164245132 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7039888033589923, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.751649303344456, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8684745291939572, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5970911688484284, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49024741231838487 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6536632164245132 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7039888033589923, @@ -150,9 +150,9 @@ "main_score": 0.5970911688484284 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49024741231838487, + "f1_macro,all": 0.6536632164245132, "acc,all": 0.7179104477611941, - "main_score": 0.49024741231838487 + "main_score": 0.6536632164245132 } }, "config_tasks": { diff --git a/ibivibiv/multimaster-7b-v6/raw_2024-06-14T13-39-45.160271/results.json b/ibivibiv/multimaster-7b-v6/raw_2024-06-14T13-39-45.160271/results.json index a496b0eac4b260e1a6c36d04ed07b4856d43d04f..ac61093cd3b93fff960503bdc2881a9a5f3b099f 100644 --- a/ibivibiv/multimaster-7b-v6/raw_2024-06-14T13-39-45.160271/results.json +++ b/ibivibiv/multimaster-7b-v6/raw_2024-06-14T13-39-45.160271/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9219507504908876, - "acc,all": 0.9219771241830066, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.775788909453694, - "mse,all": 0.4446486928104575, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5479833101529903, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__USP_2022": 0.5102040816326531, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6445066480055983, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2023": 0.6888888888888889, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2010": 0.6923076923076923, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2022": 0.5939849624060151, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2015": 0.5882352941176471, - "acc,exam_id__2013": 0.7037037037037037, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2016_2": 0.6178861788617886 - }, - "faquad_nli": { - "f1_macro,all": 0.7186147186147187, - "acc,all": 0.7538461538461538, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.867516403157304, - "acc,all": 0.8678571428571429 - }, - "oab_exams": { - "acc,all": 0.42642369020501136, - "acc,exam_id__2015-18": 0.4375, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2014-13": 0.3125, - "acc,exam_id__2013-10": 0.4375, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2012-06a": 0.4, - "acc,exam_id__2016-20": 0.425, - "acc,exam_id__2010-02": 0.45, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2016-21": 0.45, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2012-06": 0.45, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2012-08": 0.3875, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2015-17": 0.5384615384615384, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2018-25": 0.4625, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2011-05": 0.4375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.661509185600641, - "acc,all": 0.6733254994124559 - }, - "tweetsentbr": { - "f1_macro,all": 0.4976233004346325, - "acc,all": 0.7119402985074627, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9219507504908876, + "acc,all": 0.9219771241830066, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.775788909453694, + "mse,all": 0.4446486928104575, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5479833101529903, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__USP_2022": 0.5102040816326531, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6445066480055983, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2023": 0.6888888888888889, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2010": 0.6923076923076923, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2022": 0.5939849624060151, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2015": 0.5882352941176471, + "acc,exam_id__2013": 0.7037037037037037, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2016_2": 0.6178861788617886 + }, + "faquad_nli": { + "f1_macro,all": 0.7186147186147187, + "acc,all": 0.7538461538461538, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.867516403157304, + "acc,all": 0.8678571428571429 + }, + "oab_exams": { + "acc,all": 0.42642369020501136, + "acc,exam_id__2015-18": 0.4375, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2014-13": 0.3125, + "acc,exam_id__2013-10": 0.4375, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2012-06a": 0.4, + "acc,exam_id__2016-20": 0.425, + "acc,exam_id__2010-02": 0.45, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2016-21": 0.45, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2012-06": 0.45, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2012-08": 0.3875, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2015-17": 0.5384615384615384, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2018-25": 0.4625, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2011-05": 0.4375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.661509185600641, + "acc,all": 0.6733254994124559 + }, + "tweetsentbr": { + "f1_macro,all": 0.6634977339128433, + "acc,all": 0.7119402985074627, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "7b3bfecb654c86565c65cd510dd1138cb3e75087", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 71393361920, - "model_num_parameters": 35428241408, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "7b3bfecb654c86565c65cd510dd1138cb3e75087", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 71393361920, + "model_num_parameters": 35428241408, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=ibivibiv/multimaster-7b-v6,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=ibivibiv/multimaster-7b-v6,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "f2a0116" + "git_hash": "f2a0116" } \ No newline at end of file diff --git a/ibivibiv/multimaster-7b-v6/results_2024-06-14T13-39-45.160271.json b/ibivibiv/multimaster-7b-v6/results_2024-06-14T13-39-45.160271.json index 1edab49a0bb1fb9cde04741d23ac7dbddefd2e20..e451849e97643cb704864ff9debc0bf0f60313f9 100644 --- a/ibivibiv/multimaster-7b-v6/results_2024-06-14T13-39-45.160271.json +++ b/ibivibiv/multimaster-7b-v6/results_2024-06-14T13-39-45.160271.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.673546324012831, - "all_grouped_npm": 0.5164232867443359, + "all_grouped_average": 0.691976816621521, + "all_grouped_npm": 0.543849615031077, "all_grouped": { "enem_challenge": 0.6445066480055983, "bluex": 0.5479833101529903, @@ -45,7 +45,7 @@ "faquad_nli": 0.7186147186147187, "hatebr_offensive": 0.867516403157304, "portuguese_hate_speech": 0.661509185600641, - "tweetsentbr": 0.4976233004346325 + "tweetsentbr": 0.6634977339128433 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6445066480055983, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7186147186147187, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.867516403157304, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.661509185600641, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4976233004346325 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6634977339128433 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6445066480055983, @@ -150,9 +150,9 @@ "main_score": 0.661509185600641 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4976233004346325, + "f1_macro,all": 0.6634977339128433, "acc,all": 0.7119402985074627, - "main_score": 0.4976233004346325 + "main_score": 0.6634977339128433 } }, "config_tasks": { diff --git a/internlm/internlm2-chat-20b-sft/raw_2024-04-22T06-09-22.928789/results.json b/internlm/internlm2-chat-20b-sft/raw_2024-04-22T06-09-22.928789/results.json index 441bac75ba8cd6a656ea35e32004b2e689937950..969c50c4b84ce18bee021c121e008a38e50fd9c8 100644 --- a/internlm/internlm2-chat-20b-sft/raw_2024-04-22T06-09-22.928789/results.json +++ b/internlm/internlm2-chat-20b-sft/raw_2024-04-22T06-09-22.928789/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.5939675997062311, - "acc,all": 0.8905228758169934, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8010479909085628, - "mse,all": 0.39991048202614377, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5605006954102921, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__USP_2019": 0.575, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2018": 0.6111111111111112, - "acc,exam_id__USP_2022": 0.6326530612244898, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2019": 0.46, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6508047585724283, - "acc,exam_id__2016": 0.6528925619834711, - "acc,exam_id__2022": 0.6240601503759399, - "acc,exam_id__2023": 0.674074074074074, - "acc,exam_id__2009": 0.7043478260869566, - "acc,exam_id__2012": 0.6896551724137931, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2010": 0.6324786324786325, - "acc,exam_id__2013": 0.5740740740740741, - "acc,exam_id__2011": 0.6923076923076923, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2015": 0.5966386554621849 - }, - "faquad_nli": { - "f1_macro,all": 0.3792037617039467, - "acc,all": 0.7261538461538461, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6817472664224469, - "acc,all": 0.7078571428571429 - }, - "oab_exams": { - "acc,all": 0.43416856492027334, - "acc,exam_id__2015-16": 0.425, - "acc,exam_id__2015-18": 0.4625, - "acc,exam_id__2016-20": 0.4375, - "acc,exam_id__2015-17": 0.5256410256410257, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2012-06": 0.4, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2017-24": 0.5125, - "acc,exam_id__2017-23": 0.3875, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2012-06a": 0.5, - "acc,exam_id__2010-02": 0.48, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2014-15": 0.5256410256410257, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2012-09": 0.38961038961038963, - "acc,exam_id__2016-20a": 0.3875, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2014-14": 0.4375, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2012-07": 0.4125, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6747511744565651, - "acc,all": 0.7743830787309048 - }, - "tweetsentbr": { - "f1_macro,all": 0.5653399164195619, - "acc,all": 0.5875621890547263, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8909513995593468, + "acc,all": 0.8905228758169934, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8010479909085628, + "mse,all": 0.39991048202614377, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5605006954102921, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__USP_2019": 0.575, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2018": 0.6111111111111112, + "acc,exam_id__USP_2022": 0.6326530612244898, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2019": 0.46, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6508047585724283, + "acc,exam_id__2016": 0.6528925619834711, + "acc,exam_id__2022": 0.6240601503759399, + "acc,exam_id__2023": 0.674074074074074, + "acc,exam_id__2009": 0.7043478260869566, + "acc,exam_id__2012": 0.6896551724137931, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2010": 0.6324786324786325, + "acc,exam_id__2013": 0.5740740740740741, + "acc,exam_id__2011": 0.6923076923076923, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2015": 0.5966386554621849 + }, + "faquad_nli": { + "f1_macro,all": 0.56880564255592, + "acc,all": 0.7261538461538461, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6817472664224469, + "acc,all": 0.7078571428571429 + }, + "oab_exams": { + "acc,all": 0.43416856492027334, + "acc,exam_id__2015-16": 0.425, + "acc,exam_id__2015-18": 0.4625, + "acc,exam_id__2016-20": 0.4375, + "acc,exam_id__2015-17": 0.5256410256410257, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2012-06": 0.4, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2017-24": 0.5125, + "acc,exam_id__2017-23": 0.3875, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2012-06a": 0.5, + "acc,exam_id__2010-02": 0.48, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2014-15": 0.5256410256410257, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2012-09": 0.38961038961038963, + "acc,exam_id__2016-20a": 0.3875, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2014-14": 0.4375, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2012-07": 0.4125, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6747511744565651, + "acc,all": 0.7743830787309048 + }, + "tweetsentbr": { + "f1_macro,all": 0.5653399164195619, + "acc,all": 0.5875621890547263, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "71f7e1581c61865f06c4d0ea877013ac4dda1c19", - "model_dtype": "torch.float16", - "model_memory_footprint": 40527618048, - "model_num_parameters": 19861149696, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1536.9162581699347, - "min_seq_length": 1513, - "max_seq_length": 1614, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1798.9162581699347, - "min_seq_length": 1775, - "max_seq_length": 1876, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1724.4075104311544, - "min_seq_length": 1349, - "max_seq_length": 2502, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1681.2841147655704, - "min_seq_length": 1411, - "max_seq_length": 2606, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1753.9215384615384, - "min_seq_length": 1700, - "max_seq_length": 1892, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "71f7e1581c61865f06c4d0ea877013ac4dda1c19", + "model_dtype": "torch.float16", + "model_memory_footprint": 40527618048, + "model_num_parameters": 19861149696, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1479.1128571428571, - "min_seq_length": 1454, - "max_seq_length": 1739, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1463.90569476082, - "min_seq_length": 1182, - "max_seq_length": 1977, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1536.9162581699347, + "min_seq_length": 1513, + "max_seq_length": 1614, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1798.9162581699347, + "min_seq_length": 1775, + "max_seq_length": 1876, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1724.4075104311544, + "min_seq_length": 1349, + "max_seq_length": 2502, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1681.2841147655704, + "min_seq_length": 1411, + "max_seq_length": 2606, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1753.9215384615384, + "min_seq_length": 1700, + "max_seq_length": 1892, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1479.1128571428571, + "min_seq_length": 1454, + "max_seq_length": 1739, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1463.90569476082, + "min_seq_length": 1182, + "max_seq_length": 1977, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1938.6909518213865, + "min_seq_length": 1904, + "max_seq_length": 1973, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1710.6412935323383, + "min_seq_length": 1689, + "max_seq_length": 1823, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1938.6909518213865, - "min_seq_length": 1904, - "max_seq_length": 1973, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=internlm/internlm2-chat-20b-sft,dtype=float16,device=cuda:0,revision=v1.0.0,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1710.6412935323383, - "min_seq_length": 1689, - "max_seq_length": 1823, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=internlm/internlm2-chat-20b-sft,dtype=float16,device=cuda:0,revision=v1.0.0,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/internlm/internlm2-chat-20b-sft/results_2024-04-22T06-09-22.928789.json b/internlm/internlm2-chat-20b-sft/results_2024-04-22T06-09-22.928789.json index 7d4d7b61184ec91068c3b251bf47d2497db56830..b7846d61c26ffc53f5f15952fb6297b591ebb1ca 100644 --- a/internlm/internlm2-chat-20b-sft/results_2024-04-22T06-09-22.928789.json +++ b/internlm/internlm2-chat-20b-sft/results_2024-04-22T06-09-22.928789.json @@ -34,15 +34,15 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5935035253911454, - "all_grouped_npm": 0.35357586096704713, + "all_grouped_average": 0.6475686010250441, + "all_grouped_npm": 0.45829813529142693, "all_grouped": { "enem_challenge": 0.6508047585724283, "bluex": 0.5605006954102921, "oab_exams": 0.43416856492027334, - "assin2_rte": 0.5939675997062311, + "assin2_rte": 0.8909513995593468, "assin2_sts": 0.8010479909085628, - "faquad_nli": 0.3792037617039467, + "faquad_nli": 0.56880564255592, "hatebr_offensive": 0.6817472664224469, "portuguese_hate_speech": 0.6747511744565651, "tweetsentbr": 0.5653399164195619 @@ -51,9 +51,9 @@ "harness|enem_challenge|enem_challenge|None|3": 0.6508047585724283, "harness|bluex|bluex|None|3": 0.5605006954102921, "harness|oab_exams|oab_exams|None|3": 0.43416856492027334, - "harness|assin2_rte|assin2_rte|None|15": 0.5939675997062311, + "harness|assin2_rte|assin2_rte|None|15": 0.8909513995593468, "harness|assin2_sts|assin2_sts|None|15": 0.8010479909085628, - "harness|faquad_nli|faquad_nli|None|15": 0.3792037617039467, + "harness|faquad_nli|faquad_nli|None|15": 0.56880564255592, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6817472664224469, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6747511744565651, "harness|tweetsentbr|tweetsentbr|None|25": 0.5653399164195619 @@ -125,9 +125,9 @@ "main_score": 0.43416856492027334 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.5939675997062311, + "f1_macro,all": 0.8909513995593468, "acc,all": 0.8905228758169934, - "main_score": 0.5939675997062311 + "main_score": 0.8909513995593468 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.8010479909085628, @@ -135,9 +135,9 @@ "main_score": 0.8010479909085628 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.3792037617039467, + "f1_macro,all": 0.56880564255592, "acc,all": 0.7261538461538461, - "main_score": 0.3792037617039467 + "main_score": 0.56880564255592 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.6817472664224469, diff --git a/internlm/internlm2-chat-20b/raw_2024-02-24T08-23-06.516805/results.json b/internlm/internlm2-chat-20b/raw_2024-02-24T08-23-06.516805/results.json index f3251ec3f563c8b589ce9c30a01f7d582cacb9ac..69aaba4932236621ecfa07cd3e98f470acba98e8 100644 --- a/internlm/internlm2-chat-20b/raw_2024-02-24T08-23-06.516805/results.json +++ b/internlm/internlm2-chat-20b/raw_2024-02-24T08-23-06.516805/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8938164607999035, - "acc,all": 0.8941993464052288, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8005865862472271, - "mse,all": 0.40656917156862743, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.564673157162726, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__UNICAMP_2018": 0.5925925925925926, - "acc,exam_id__UNICAMP_2019": 0.46, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__USP_2024": 0.7317073170731707, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6480055983205039, - "acc,exam_id__2013": 0.5555555555555556, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2016": 0.6198347107438017, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2023": 0.6814814814814815, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2012": 0.6896551724137931, - "acc,exam_id__2009": 0.6695652173913044, - "acc,exam_id__2015": 0.6050420168067226, - "acc,exam_id__2022": 0.6390977443609023, - "acc,exam_id__2010": 0.6581196581196581 - }, - "faquad_nli": { - "f1_macro,all": 0.5389191011031254, - "acc,all": 0.8553846153846154, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6808266922301873, - "acc,all": 0.7071428571428572 - }, - "oab_exams": { - "acc,all": 0.4369020501138952, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2012-06": 0.4, - "acc,exam_id__2015-18": 0.475, - "acc,exam_id__2014-14": 0.4375, - "acc,exam_id__2012-07": 0.425, - "acc,exam_id__2015-16": 0.4125, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2012-06a": 0.5, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2017-24": 0.525, - "acc,exam_id__2016-20": 0.4625, - "acc,exam_id__2017-22": 0.5, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2010-02": 0.47, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2014-15": 0.5256410256410257, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2014-13": 0.45, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2015-17": 0.5384615384615384, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2011-04": 0.3875, - "acc,exam_id__2012-09": 0.4025974025974026, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6917627356286026, - "acc,all": 0.781433607520564 - }, - "tweetsentbr": { - "f1_macro,all": 0.5573254035000713, - "acc,all": 0.5796019900497512, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8938164607999035, + "acc,all": 0.8941993464052288, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8005865862472271, + "mse,all": 0.40656917156862743, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.564673157162726, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__UNICAMP_2018": 0.5925925925925926, + "acc,exam_id__UNICAMP_2019": 0.46, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__USP_2024": 0.7317073170731707, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6480055983205039, + "acc,exam_id__2013": 0.5555555555555556, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2016": 0.6198347107438017, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2023": 0.6814814814814815, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2012": 0.6896551724137931, + "acc,exam_id__2009": 0.6695652173913044, + "acc,exam_id__2015": 0.6050420168067226, + "acc,exam_id__2022": 0.6390977443609023, + "acc,exam_id__2010": 0.6581196581196581 + }, + "faquad_nli": { + "f1_macro,all": 0.808378651654688, + "acc,all": 0.8553846153846154, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6808266922301873, + "acc,all": 0.7071428571428572 + }, + "oab_exams": { + "acc,all": 0.4369020501138952, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2012-06": 0.4, + "acc,exam_id__2015-18": 0.475, + "acc,exam_id__2014-14": 0.4375, + "acc,exam_id__2012-07": 0.425, + "acc,exam_id__2015-16": 0.4125, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2012-06a": 0.5, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2017-24": 0.525, + "acc,exam_id__2016-20": 0.4625, + "acc,exam_id__2017-22": 0.5, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2010-02": 0.47, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2014-15": 0.5256410256410257, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2014-13": 0.45, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2015-17": 0.5384615384615384, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2011-04": 0.3875, + "acc,exam_id__2012-09": 0.4025974025974026, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6917627356286026, + "acc,all": 0.781433607520564 + }, + "tweetsentbr": { + "f1_macro,all": 0.5573254035000713, + "acc,all": 0.5796019900497512, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "3f710f76f56f8c40dc5dd800dbe66f3341cb2c87", - "model_dtype": "torch.float16", - "model_memory_footprint": 40527618048, - "model_num_parameters": 19861149696, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 2, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1536.9162581699347, - "min_seq_length": 1513, - "max_seq_length": 1614, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1798.9162581699347, - "min_seq_length": 1775, - "max_seq_length": 1876, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1724.4075104311544, - "min_seq_length": 1349, - "max_seq_length": 2502, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1681.2841147655704, - "min_seq_length": 1411, - "max_seq_length": 2606, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1753.9215384615384, - "min_seq_length": 1700, - "max_seq_length": 1892, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1479.1128571428571, - "min_seq_length": 1454, - "max_seq_length": 1739, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "3f710f76f56f8c40dc5dd800dbe66f3341cb2c87", + "model_dtype": "torch.float16", + "model_memory_footprint": 40527618048, + "model_num_parameters": 19861149696, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 2, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1463.90569476082, - "min_seq_length": 1182, - "max_seq_length": 1977, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1536.9162581699347, + "min_seq_length": 1513, + "max_seq_length": 1614, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1798.9162581699347, + "min_seq_length": 1775, + "max_seq_length": 1876, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1724.4075104311544, + "min_seq_length": 1349, + "max_seq_length": 2502, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1681.2841147655704, + "min_seq_length": 1411, + "max_seq_length": 2606, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1753.9215384615384, + "min_seq_length": 1700, + "max_seq_length": 1892, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1479.1128571428571, + "min_seq_length": 1454, + "max_seq_length": 1739, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1463.90569476082, + "min_seq_length": 1182, + "max_seq_length": 1977, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1938.6909518213865, + "min_seq_length": 1904, + "max_seq_length": 1973, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1710.6412935323383, + "min_seq_length": 1689, + "max_seq_length": 1823, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1938.6909518213865, - "min_seq_length": 1904, - "max_seq_length": 1973, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=internlm/internlm2-chat-20b,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1710.6412935323383, - "min_seq_length": 1689, - "max_seq_length": 1823, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=internlm/internlm2-chat-20b,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/internlm/internlm2-chat-20b/raw_2024-04-21T14-44-01.464293/results.json b/internlm/internlm2-chat-20b/raw_2024-04-21T14-44-01.464293/results.json index 65d26b04420dd027d536e9bdfe4b26d62375d0b4..161b2ed9653c1fdd3bb0c9fd2b2a0a6cd42a7141 100644 --- a/internlm/internlm2-chat-20b/raw_2024-04-21T14-44-01.464293/results.json +++ b/internlm/internlm2-chat-20b/raw_2024-04-21T14-44-01.464293/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8938164607999035, - "acc,all": 0.8941993464052288, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8005865862472271, - "mse,all": 0.40656917156862743, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.564673157162726, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__UNICAMP_2018": 0.5925925925925926, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2019": 0.46, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6480055983205039, - "acc,exam_id__2016": 0.6198347107438017, - "acc,exam_id__2022": 0.6390977443609023, - "acc,exam_id__2023": 0.6814814814814815, - "acc,exam_id__2009": 0.6695652173913044, - "acc,exam_id__2012": 0.6896551724137931, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2010": 0.6581196581196581, - "acc,exam_id__2013": 0.5555555555555556, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2015": 0.6050420168067226 - }, - "faquad_nli": { - "f1_macro,all": 0.5389191011031254, - "acc,all": 0.8553846153846154, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6808266922301873, - "acc,all": 0.7071428571428572 - }, - "oab_exams": { - "acc,all": 0.4369020501138952, - "acc,exam_id__2015-16": 0.4125, - "acc,exam_id__2015-18": 0.475, - "acc,exam_id__2016-20": 0.4625, - "acc,exam_id__2015-17": 0.5384615384615384, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2012-06": 0.4, - "acc,exam_id__2011-04": 0.3875, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2017-24": 0.525, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2017-22": 0.5, - "acc,exam_id__2012-06a": 0.5, - "acc,exam_id__2010-02": 0.47, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2014-15": 0.5256410256410257, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2012-09": 0.4025974025974026, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2014-13": 0.45, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2014-14": 0.4375, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2012-07": 0.425, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6917627356286026, - "acc,all": 0.781433607520564 - }, - "tweetsentbr": { - "f1_macro,all": 0.5573254035000713, - "acc,all": 0.5796019900497512, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8938164607999035, + "acc,all": 0.8941993464052288, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8005865862472271, + "mse,all": 0.40656917156862743, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.564673157162726, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__UNICAMP_2018": 0.5925925925925926, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2019": 0.46, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6480055983205039, + "acc,exam_id__2016": 0.6198347107438017, + "acc,exam_id__2022": 0.6390977443609023, + "acc,exam_id__2023": 0.6814814814814815, + "acc,exam_id__2009": 0.6695652173913044, + "acc,exam_id__2012": 0.6896551724137931, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2010": 0.6581196581196581, + "acc,exam_id__2013": 0.5555555555555556, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2015": 0.6050420168067226 + }, + "faquad_nli": { + "f1_macro,all": 0.808378651654688, + "acc,all": 0.8553846153846154, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6808266922301873, + "acc,all": 0.7071428571428572 + }, + "oab_exams": { + "acc,all": 0.4369020501138952, + "acc,exam_id__2015-16": 0.4125, + "acc,exam_id__2015-18": 0.475, + "acc,exam_id__2016-20": 0.4625, + "acc,exam_id__2015-17": 0.5384615384615384, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2012-06": 0.4, + "acc,exam_id__2011-04": 0.3875, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2017-24": 0.525, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2017-22": 0.5, + "acc,exam_id__2012-06a": 0.5, + "acc,exam_id__2010-02": 0.47, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2014-15": 0.5256410256410257, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2012-09": 0.4025974025974026, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2014-13": 0.45, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2014-14": 0.4375, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2012-07": 0.425, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6917627356286026, + "acc,all": 0.781433607520564 + }, + "tweetsentbr": { + "f1_macro,all": 0.5573254035000713, + "acc,all": 0.5796019900497512, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "19833d4a5f461f2a4c4b86b0c4ebd24b5ca8869c", - "model_dtype": "torch.float16", - "model_memory_footprint": 40527618048, - "model_num_parameters": 19861149696, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1536.9162581699347, - "min_seq_length": 1513, - "max_seq_length": 1614, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1798.9162581699347, - "min_seq_length": 1775, - "max_seq_length": 1876, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1724.4075104311544, - "min_seq_length": 1349, - "max_seq_length": 2502, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1681.2841147655704, - "min_seq_length": 1411, - "max_seq_length": 2606, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1753.9215384615384, - "min_seq_length": 1700, - "max_seq_length": 1892, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "19833d4a5f461f2a4c4b86b0c4ebd24b5ca8869c", + "model_dtype": "torch.float16", + "model_memory_footprint": 40527618048, + "model_num_parameters": 19861149696, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1479.1128571428571, - "min_seq_length": 1454, - "max_seq_length": 1739, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1463.90569476082, - "min_seq_length": 1182, - "max_seq_length": 1977, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1536.9162581699347, + "min_seq_length": 1513, + "max_seq_length": 1614, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1798.9162581699347, + "min_seq_length": 1775, + "max_seq_length": 1876, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1724.4075104311544, + "min_seq_length": 1349, + "max_seq_length": 2502, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1681.2841147655704, + "min_seq_length": 1411, + "max_seq_length": 2606, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1753.9215384615384, + "min_seq_length": 1700, + "max_seq_length": 1892, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1479.1128571428571, + "min_seq_length": 1454, + "max_seq_length": 1739, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1463.90569476082, + "min_seq_length": 1182, + "max_seq_length": 1977, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1938.6909518213865, + "min_seq_length": 1904, + "max_seq_length": 1973, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1710.6412935323383, + "min_seq_length": 1689, + "max_seq_length": 1823, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1938.6909518213865, - "min_seq_length": 1904, - "max_seq_length": 1973, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=internlm/internlm2-chat-20b,dtype=float16,device=cuda:0,revision=v1.0.0,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1710.6412935323383, - "min_seq_length": 1689, - "max_seq_length": 1823, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=internlm/internlm2-chat-20b,dtype=float16,device=cuda:0,revision=v1.0.0,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/internlm/internlm2-chat-20b/results_2024-02-24T08-23-06.516805.json b/internlm/internlm2-chat-20b/results_2024-02-24T08-23-06.516805.json index 8e310cd7a1b727716d7a44ca72b6670a6f0d29b4..f6894d365691a43448f42b1df1b7847aa6fb4a06 100644 --- a/internlm/internlm2-chat-20b/results_2024-02-24T08-23-06.516805.json +++ b/internlm/internlm2-chat-20b/results_2024-02-24T08-23-06.516805.json @@ -34,15 +34,15 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6458686427895824, - "all_grouped_npm": 0.4554919111495502, + "all_grouped_average": 0.6758085928508673, + "all_grouped_npm": 0.5105285840563236, "all_grouped": { "enem_challenge": 0.6480055983205039, "bluex": 0.564673157162726, "oab_exams": 0.4369020501138952, "assin2_rte": 0.8938164607999035, "assin2_sts": 0.8005865862472271, - "faquad_nli": 0.5389191011031254, + "faquad_nli": 0.808378651654688, "hatebr_offensive": 0.6808266922301873, "portuguese_hate_speech": 0.6917627356286026, "tweetsentbr": 0.5573254035000713 @@ -53,7 +53,7 @@ "harness|oab_exams|oab_exams|None|3": 0.4369020501138952, "harness|assin2_rte|assin2_rte|None|15": 0.8938164607999035, "harness|assin2_sts|assin2_sts|None|15": 0.8005865862472271, - "harness|faquad_nli|faquad_nli|None|15": 0.5389191011031254, + "harness|faquad_nli|faquad_nli|None|15": 0.808378651654688, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6808266922301873, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6917627356286026, "harness|tweetsentbr|tweetsentbr|None|25": 0.5573254035000713 @@ -135,9 +135,9 @@ "main_score": 0.8005865862472271 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.5389191011031254, + "f1_macro,all": 0.808378651654688, "acc,all": 0.8553846153846154, - "main_score": 0.5389191011031254 + "main_score": 0.808378651654688 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.6808266922301873, diff --git a/internlm/internlm2-chat-20b/results_2024-04-21T14-44-01.464293.json b/internlm/internlm2-chat-20b/results_2024-04-21T14-44-01.464293.json index 8b62b4a79a519cfb8128b3a233bade15deaacaa8..f61a205a8cf63a2646365fb10fca030cb7c538d6 100644 --- a/internlm/internlm2-chat-20b/results_2024-04-21T14-44-01.464293.json +++ b/internlm/internlm2-chat-20b/results_2024-04-21T14-44-01.464293.json @@ -34,15 +34,15 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6458686427895824, - "all_grouped_npm": 0.4554919111495502, + "all_grouped_average": 0.6758085928508673, + "all_grouped_npm": 0.5105285840563236, "all_grouped": { "enem_challenge": 0.6480055983205039, "bluex": 0.564673157162726, "oab_exams": 0.4369020501138952, "assin2_rte": 0.8938164607999035, "assin2_sts": 0.8005865862472271, - "faquad_nli": 0.5389191011031254, + "faquad_nli": 0.808378651654688, "hatebr_offensive": 0.6808266922301873, "portuguese_hate_speech": 0.6917627356286026, "tweetsentbr": 0.5573254035000713 @@ -53,7 +53,7 @@ "harness|oab_exams|oab_exams|None|3": 0.4369020501138952, "harness|assin2_rte|assin2_rte|None|15": 0.8938164607999035, "harness|assin2_sts|assin2_sts|None|15": 0.8005865862472271, - "harness|faquad_nli|faquad_nli|None|15": 0.5389191011031254, + "harness|faquad_nli|faquad_nli|None|15": 0.808378651654688, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6808266922301873, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6917627356286026, "harness|tweetsentbr|tweetsentbr|None|25": 0.5573254035000713 @@ -135,9 +135,9 @@ "main_score": 0.8005865862472271 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.5389191011031254, + "f1_macro,all": 0.808378651654688, "acc,all": 0.8553846153846154, - "main_score": 0.5389191011031254 + "main_score": 0.808378651654688 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.6808266922301873, diff --git a/internlm/internlm2_5-1_8b/raw_2024-08-11T08-17-24.270891/results.json b/internlm/internlm2_5-1_8b/raw_2024-08-11T08-17-24.270891/results.json index 3a5c5e5970aa9a004f6346928fd8ebda00a9391d..616d42d07d6aa28e1d842435178ff4ae82872d11 100644 --- a/internlm/internlm2_5-1_8b/raw_2024-08-11T08-17-24.270891/results.json +++ b/internlm/internlm2_5-1_8b/raw_2024-08-11T08-17-24.270891/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.4014150608539269, - "acc,all": 0.5314542483660131, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.35026225315542026, - "mse,all": 1.7070465686274512, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.3226703755215577, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__USP_2022": 0.2857142857142857, - "acc,exam_id__UNICAMP_2019": 0.26, - "acc,exam_id__UNICAMP_2020": 0.2909090909090909, - "acc,exam_id__USP_2024": 0.21951219512195122, - "acc,exam_id__UNICAMP_2024": 0.37777777777777777, - "acc,exam_id__USP_2021": 0.2692307692307692, - "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, - "acc,exam_id__UNICAMP_2021_1": 0.3695652173913043, - "acc,exam_id__UNICAMP_2022": 0.28205128205128205, - "acc,exam_id__USP_2023": 0.3181818181818182, - "acc,exam_id__UNICAMP_2023": 0.5581395348837209, - "acc,exam_id__USP_2018": 0.24074074074074073, - "acc,exam_id__USP_2020": 0.3392857142857143, - "acc,exam_id__USP_2019": 0.225, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.3282015395381386, - "acc,exam_id__2011": 0.36752136752136755, - "acc,exam_id__2010": 0.3504273504273504, - "acc,exam_id__2009": 0.2782608695652174, - "acc,exam_id__2015": 0.23529411764705882, - "acc,exam_id__2017": 0.3620689655172414, - "acc,exam_id__2014": 0.24770642201834864, - "acc,exam_id__2022": 0.39849624060150374, - "acc,exam_id__2012": 0.33620689655172414, - "acc,exam_id__2023": 0.37037037037037035, - "acc,exam_id__2016_2": 0.3252032520325203, - "acc,exam_id__2016": 0.2975206611570248, - "acc,exam_id__2013": 0.35185185185185186 - }, - "faquad_nli": { - "f1_macro,all": 0.4386873920552677, - "acc,all": 0.7815384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.3594117505398441, - "acc,all": 0.5114285714285715 - }, - "oab_exams": { - "acc,all": 0.3148063781321184, - "acc,exam_id__2017-24": 0.3375, - "acc,exam_id__2013-12": 0.2875, - "acc,exam_id__2015-18": 0.2625, - "acc,exam_id__2018-25": 0.3125, - "acc,exam_id__2016-19": 0.34615384615384615, - "acc,exam_id__2010-01": 0.24705882352941178, - "acc,exam_id__2016-20": 0.325, - "acc,exam_id__2016-21": 0.325, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2014-15": 0.3333333333333333, - "acc,exam_id__2010-02": 0.34, - "acc,exam_id__2013-10": 0.2375, - "acc,exam_id__2012-07": 0.3375, - "acc,exam_id__2015-17": 0.358974358974359, - "acc,exam_id__2013-11": 0.375, - "acc,exam_id__2012-09": 0.2597402597402597, - "acc,exam_id__2015-16": 0.25, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2011-03": 0.35353535353535354, - "acc,exam_id__2016-20a": 0.2625, - "acc,exam_id__2014-13": 0.275, - "acc,exam_id__2017-23": 0.3375, - "acc,exam_id__2012-06": 0.3375, - "acc,exam_id__2011-05": 0.2625, - "acc,exam_id__2014-14": 0.3375, - "acc,exam_id__2012-06a": 0.3125, - "acc,exam_id__2017-22": 0.3625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.2941074437506195, - "acc,all": 0.6874265569917744 - }, - "tweetsentbr": { - "f1_macro,all": 0.4337044491104478, - "acc,all": 0.5741293532338309, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.4014150608539269, + "acc,all": 0.5314542483660131, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.35026225315542026, + "mse,all": 1.7070465686274512, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.3226703755215577, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__USP_2022": 0.2857142857142857, + "acc,exam_id__UNICAMP_2019": 0.26, + "acc,exam_id__UNICAMP_2020": 0.2909090909090909, + "acc,exam_id__USP_2024": 0.21951219512195122, + "acc,exam_id__UNICAMP_2024": 0.37777777777777777, + "acc,exam_id__USP_2021": 0.2692307692307692, + "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, + "acc,exam_id__UNICAMP_2021_1": 0.3695652173913043, + "acc,exam_id__UNICAMP_2022": 0.28205128205128205, + "acc,exam_id__USP_2023": 0.3181818181818182, + "acc,exam_id__UNICAMP_2023": 0.5581395348837209, + "acc,exam_id__USP_2018": 0.24074074074074073, + "acc,exam_id__USP_2020": 0.3392857142857143, + "acc,exam_id__USP_2019": 0.225, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.3282015395381386, + "acc,exam_id__2011": 0.36752136752136755, + "acc,exam_id__2010": 0.3504273504273504, + "acc,exam_id__2009": 0.2782608695652174, + "acc,exam_id__2015": 0.23529411764705882, + "acc,exam_id__2017": 0.3620689655172414, + "acc,exam_id__2014": 0.24770642201834864, + "acc,exam_id__2022": 0.39849624060150374, + "acc,exam_id__2012": 0.33620689655172414, + "acc,exam_id__2023": 0.37037037037037035, + "acc,exam_id__2016_2": 0.3252032520325203, + "acc,exam_id__2016": 0.2975206611570248, + "acc,exam_id__2013": 0.35185185185185186 + }, + "faquad_nli": { + "f1_macro,all": 0.4386873920552677, + "acc,all": 0.7815384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.3594117505398441, + "acc,all": 0.5114285714285715 + }, + "oab_exams": { + "acc,all": 0.3148063781321184, + "acc,exam_id__2017-24": 0.3375, + "acc,exam_id__2013-12": 0.2875, + "acc,exam_id__2015-18": 0.2625, + "acc,exam_id__2018-25": 0.3125, + "acc,exam_id__2016-19": 0.34615384615384615, + "acc,exam_id__2010-01": 0.24705882352941178, + "acc,exam_id__2016-20": 0.325, + "acc,exam_id__2016-21": 0.325, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2014-15": 0.3333333333333333, + "acc,exam_id__2010-02": 0.34, + "acc,exam_id__2013-10": 0.2375, + "acc,exam_id__2012-07": 0.3375, + "acc,exam_id__2015-17": 0.358974358974359, + "acc,exam_id__2013-11": 0.375, + "acc,exam_id__2012-09": 0.2597402597402597, + "acc,exam_id__2015-16": 0.25, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2011-03": 0.35353535353535354, + "acc,exam_id__2016-20a": 0.2625, + "acc,exam_id__2014-13": 0.275, + "acc,exam_id__2017-23": 0.3375, + "acc,exam_id__2012-06": 0.3375, + "acc,exam_id__2011-05": 0.2625, + "acc,exam_id__2014-14": 0.3375, + "acc,exam_id__2012-06a": 0.3125, + "acc,exam_id__2017-22": 0.3625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.44116116562592916, + "acc,all": 0.6874265569917744 + }, + "tweetsentbr": { + "f1_macro,all": 0.4337044491104478, + "acc,all": 0.5741293532338309, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "b0460b575ce3631a2bfee19f883bc5fb3eccafdc", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 3778226176, - "model_num_parameters": 1889110016, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1372.9162581699347, - "min_seq_length": 1349, - "max_seq_length": 1450, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1649.9162581699347, - "min_seq_length": 1626, - "max_seq_length": 1727, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1680.4075104311544, - "min_seq_length": 1305, - "max_seq_length": 2458, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1637.2841147655704, - "min_seq_length": 1367, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1589.9215384615384, - "min_seq_length": 1536, - "max_seq_length": 1728, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "b0460b575ce3631a2bfee19f883bc5fb3eccafdc", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 3778226176, + "model_num_parameters": 1889110016, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1215.1128571428571, - "min_seq_length": 1190, - "max_seq_length": 1475, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1419.90569476082, - "min_seq_length": 1138, - "max_seq_length": 1933, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1372.9162581699347, + "min_seq_length": 1349, + "max_seq_length": 1450, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1649.9162581699347, + "min_seq_length": 1626, + "max_seq_length": 1727, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1680.4075104311544, + "min_seq_length": 1305, + "max_seq_length": 2458, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1637.2841147655704, + "min_seq_length": 1367, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1589.9215384615384, + "min_seq_length": 1536, + "max_seq_length": 1728, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1215.1128571428571, + "min_seq_length": 1190, + "max_seq_length": 1475, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1419.90569476082, + "min_seq_length": 1138, + "max_seq_length": 1933, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1674.6909518213865, + "min_seq_length": 1640, + "max_seq_length": 1709, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1446.6412935323383, + "min_seq_length": 1425, + "max_seq_length": 1559, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1674.6909518213865, - "min_seq_length": 1640, - "max_seq_length": 1709, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=internlm/internlm2_5-1_8b,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1446.6412935323383, - "min_seq_length": 1425, - "max_seq_length": 1559, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=internlm/internlm2_5-1_8b,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/internlm/internlm2_5-1_8b/results_2024-08-11T08-17-24.270891.json b/internlm/internlm2_5-1_8b/results_2024-08-11T08-17-24.270891.json index 99dd34d8a836d34c53cb5aeebe7b53ff37fd6c75..c80f15e588d4c82c7d447474ae022289f26dd537 100644 --- a/internlm/internlm2_5-1_8b/results_2024-08-11T08-17-24.270891.json +++ b/internlm/internlm2_5-1_8b/results_2024-08-11T08-17-24.270891.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.3603629602952601, - "all_grouped_npm": 0.0016884543994314537, + "all_grouped_average": 0.37670226272585006, + "all_grouped_npm": 0.03304987941016076, "all_grouped": { "enem_challenge": 0.3282015395381386, "bluex": 0.3226703755215577, @@ -44,7 +44,7 @@ "assin2_sts": 0.35026225315542026, "faquad_nli": 0.4386873920552677, "hatebr_offensive": 0.3594117505398441, - "portuguese_hate_speech": 0.2941074437506195, + "portuguese_hate_speech": 0.44116116562592916, "tweetsentbr": 0.4337044491104478 }, "all": { @@ -55,7 +55,7 @@ "harness|assin2_sts|assin2_sts|None|15": 0.35026225315542026, "harness|faquad_nli|faquad_nli|None|15": 0.4386873920552677, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3594117505398441, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2941074437506195, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.44116116562592916, "harness|tweetsentbr|tweetsentbr|None|25": 0.4337044491104478 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -145,9 +145,9 @@ "main_score": 0.3594117505398441 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.2941074437506195, + "f1_macro,all": 0.44116116562592916, "acc,all": 0.6874265569917744, - "main_score": 0.2941074437506195 + "main_score": 0.44116116562592916 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.4337044491104478, diff --git a/internlm/internlm2_5-20b-chat/raw_2024-08-11T09-06-11.616640/results.json b/internlm/internlm2_5-20b-chat/raw_2024-08-11T09-06-11.616640/results.json index 1bb689ad99a1e77739fbed893d493ea51ce097b5..970abb93536598464a149b9eb1bef6fb3dad6565 100644 --- a/internlm/internlm2_5-20b-chat/raw_2024-08-11T09-06-11.616640/results.json +++ b/internlm/internlm2_5-20b-chat/raw_2024-08-11T09-06-11.616640/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.3941550122089814, - "acc,all": 0.5849673202614379, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8220632161489191, - "mse,all": 0.37319035947712415, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.6133518776077886, - "acc,exam_id__UNICAMP_2018": 0.6851851851851852, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2021": 0.6153846153846154, - "acc,exam_id__UNICAMP_2021_2": 0.7058823529411765, - "acc,exam_id__UNICAMP_2021_1": 0.6521739130434783, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__USP_2020": 0.6071428571428571, - "acc,exam_id__USP_2019": 0.575, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6731980405878236, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2009": 0.7130434782608696, - "acc,exam_id__2015": 0.7142857142857143, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2014": 0.6513761467889908, - "acc,exam_id__2022": 0.6466165413533834, - "acc,exam_id__2012": 0.6724137931034483, - "acc,exam_id__2023": 0.6962962962962963, - "acc,exam_id__2016_2": 0.7073170731707317, - "acc,exam_id__2016": 0.5785123966942148, - "acc,exam_id__2013": 0.6388888888888888 - }, - "faquad_nli": { - "f1_macro,all": 0.2856301531213192, - "acc,all": 0.7461538461538462, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.31213437914013215, - "acc,all": 0.49142857142857144 - }, - "oab_exams": { - "acc,all": 0.4542141230068337, - "acc,exam_id__2017-24": 0.5, - "acc,exam_id__2013-12": 0.525, - "acc,exam_id__2015-18": 0.475, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2016-20": 0.4375, - "acc,exam_id__2016-21": 0.4375, - "acc,exam_id__2011-04": 0.4625, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2010-02": 0.52, - "acc,exam_id__2013-10": 0.45, - "acc,exam_id__2012-07": 0.4875, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2012-09": 0.4805194805194805, - "acc,exam_id__2015-16": 0.5, - "acc,exam_id__2012-08": 0.4375, - "acc,exam_id__2011-03": 0.41414141414141414, - "acc,exam_id__2016-20a": 0.3625, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2017-23": 0.35, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2012-06a": 0.4375, - "acc,exam_id__2017-22": 0.5375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.28469795362745753, - "acc,all": 0.5605170387779084 - }, - "tweetsentbr": { - "f1_macro,all": 0.6465655982053967, - "acc,all": 0.6900497512437811, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.5912325183134722, + "acc,all": 0.5849673202614379, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8220632161489191, + "mse,all": 0.37319035947712415, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.6133518776077886, + "acc,exam_id__UNICAMP_2018": 0.6851851851851852, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2021": 0.6153846153846154, + "acc,exam_id__UNICAMP_2021_2": 0.7058823529411765, + "acc,exam_id__UNICAMP_2021_1": 0.6521739130434783, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__USP_2020": 0.6071428571428571, + "acc,exam_id__USP_2019": 0.575, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6731980405878236, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2009": 0.7130434782608696, + "acc,exam_id__2015": 0.7142857142857143, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2014": 0.6513761467889908, + "acc,exam_id__2022": 0.6466165413533834, + "acc,exam_id__2012": 0.6724137931034483, + "acc,exam_id__2023": 0.6962962962962963, + "acc,exam_id__2016_2": 0.7073170731707317, + "acc,exam_id__2016": 0.5785123966942148, + "acc,exam_id__2013": 0.6388888888888888 + }, + "faquad_nli": { + "f1_macro,all": 0.4284452296819788, + "acc,all": 0.7461538461538462, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.46820156871019825, + "acc,all": 0.49142857142857144 + }, + "oab_exams": { + "acc,all": 0.4542141230068337, + "acc,exam_id__2017-24": 0.5, + "acc,exam_id__2013-12": 0.525, + "acc,exam_id__2015-18": 0.475, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2016-20": 0.4375, + "acc,exam_id__2016-21": 0.4375, + "acc,exam_id__2011-04": 0.4625, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2010-02": 0.52, + "acc,exam_id__2013-10": 0.45, + "acc,exam_id__2012-07": 0.4875, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2012-09": 0.4805194805194805, + "acc,exam_id__2015-16": 0.5, + "acc,exam_id__2012-08": 0.4375, + "acc,exam_id__2011-03": 0.41414141414141414, + "acc,exam_id__2016-20a": 0.3625, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2017-23": 0.35, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2012-06a": 0.4375, + "acc,exam_id__2017-22": 0.5375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4270469304411863, + "acc,all": 0.5605170387779084 + }, + "tweetsentbr": { + "f1_macro,all": 0.6465655982053967, + "acc,all": 0.6900497512437811, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "ef17bde929761255fee76d95e2c25969ccd93b0d", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 39722311680, - "model_num_parameters": 19861149696, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1536.9162581699347, - "min_seq_length": 1513, - "max_seq_length": 1614, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1798.9162581699347, - "min_seq_length": 1775, - "max_seq_length": 1876, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1724.4075104311544, - "min_seq_length": 1349, - "max_seq_length": 2502, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1681.2841147655704, - "min_seq_length": 1411, - "max_seq_length": 2606, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1753.9215384615384, - "min_seq_length": 1700, - "max_seq_length": 1892, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "ef17bde929761255fee76d95e2c25969ccd93b0d", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 39722311680, + "model_num_parameters": 19861149696, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1479.1128571428571, - "min_seq_length": 1454, - "max_seq_length": 1739, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1463.90569476082, - "min_seq_length": 1182, - "max_seq_length": 1977, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1536.9162581699347, + "min_seq_length": 1513, + "max_seq_length": 1614, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1798.9162581699347, + "min_seq_length": 1775, + "max_seq_length": 1876, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1724.4075104311544, + "min_seq_length": 1349, + "max_seq_length": 2502, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1681.2841147655704, + "min_seq_length": 1411, + "max_seq_length": 2606, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1753.9215384615384, + "min_seq_length": 1700, + "max_seq_length": 1892, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1479.1128571428571, + "min_seq_length": 1454, + "max_seq_length": 1739, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1463.90569476082, + "min_seq_length": 1182, + "max_seq_length": 1977, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1938.6909518213865, + "min_seq_length": 1904, + "max_seq_length": 1973, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1710.6412935323383, + "min_seq_length": 1689, + "max_seq_length": 1823, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1938.6909518213865, - "min_seq_length": 1904, - "max_seq_length": 1973, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=internlm/internlm2_5-20b-chat,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1710.6412935323383, - "min_seq_length": 1689, - "max_seq_length": 1823, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=internlm/internlm2_5-20b-chat,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/internlm/internlm2_5-20b-chat/results_2024-08-11T09-06-11.616640.json b/internlm/internlm2_5-20b-chat/results_2024-08-11T09-06-11.616640.json index 5db91e03e1c2bbdfeedd0aea8636da9732386035..9bd631c7e6be15d27c8e141e1932be73e1a61d4e 100644 --- a/internlm/internlm2_5-20b-chat/results_2024-08-11T09-06-11.616640.json +++ b/internlm/internlm2_5-20b-chat/results_2024-08-11T09-06-11.616640.json @@ -34,28 +34,28 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.49844559485051687, - "all_grouped_npm": 0.15416219332390457, + "all_grouped_average": 0.5693687891892886, + "all_grouped_npm": 0.2921666055310192, "all_grouped": { "enem_challenge": 0.6731980405878236, "bluex": 0.6133518776077886, "oab_exams": 0.4542141230068337, - "assin2_rte": 0.3941550122089814, + "assin2_rte": 0.5912325183134722, "assin2_sts": 0.8220632161489191, - "faquad_nli": 0.2856301531213192, - "hatebr_offensive": 0.31213437914013215, - "portuguese_hate_speech": 0.28469795362745753, + "faquad_nli": 0.4284452296819788, + "hatebr_offensive": 0.46820156871019825, + "portuguese_hate_speech": 0.4270469304411863, "tweetsentbr": 0.6465655982053967 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6731980405878236, "harness|bluex|bluex|None|3": 0.6133518776077886, "harness|oab_exams|oab_exams|None|3": 0.4542141230068337, - "harness|assin2_rte|assin2_rte|None|15": 0.3941550122089814, + "harness|assin2_rte|assin2_rte|None|15": 0.5912325183134722, "harness|assin2_sts|assin2_sts|None|15": 0.8220632161489191, - "harness|faquad_nli|faquad_nli|None|15": 0.2856301531213192, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.31213437914013215, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.28469795362745753, + "harness|faquad_nli|faquad_nli|None|15": 0.4284452296819788, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.46820156871019825, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4270469304411863, "harness|tweetsentbr|tweetsentbr|None|25": 0.6465655982053967 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -125,9 +125,9 @@ "main_score": 0.4542141230068337 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.3941550122089814, + "f1_macro,all": 0.5912325183134722, "acc,all": 0.5849673202614379, - "main_score": 0.3941550122089814 + "main_score": 0.5912325183134722 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.8220632161489191, @@ -135,19 +135,19 @@ "main_score": 0.8220632161489191 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.2856301531213192, + "f1_macro,all": 0.4284452296819788, "acc,all": 0.7461538461538462, - "main_score": 0.2856301531213192 + "main_score": 0.4284452296819788 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.31213437914013215, + "f1_macro,all": 0.46820156871019825, "acc,all": 0.49142857142857144, - "main_score": 0.31213437914013215 + "main_score": 0.46820156871019825 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.28469795362745753, + "f1_macro,all": 0.4270469304411863, "acc,all": 0.5605170387779084, - "main_score": 0.28469795362745753 + "main_score": 0.4270469304411863 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.6465655982053967, diff --git a/invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp/raw_2024-05-19T12-51-48.449854/results.json b/invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp/raw_2024-05-19T12-51-48.449854/results.json index 5449fd409025db9fbc7b3c660965bad1e35a8b61..4618dabec9a837103f70ecaf0ef656e144ac7c74 100644 --- a/invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp/raw_2024-05-19T12-51-48.449854/results.json +++ b/invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp/raw_2024-05-19T12-51-48.449854/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9293286384185251, - "acc,all": 0.9293300653594772, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8165192082173185, - "mse,all": 0.498736368872549, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5841446453407511, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__USP_2024": 0.8048780487804879, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__UNICAMP_2023": 0.6976744186046512, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__USP_2019": 0.575, - "acc,exam_id__USP_2020": 0.42857142857142855, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__UNICAMP_2020": 0.6181818181818182, - "acc,exam_id__USP_2022": 0.5510204081632653, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6850944716585025, - "acc,exam_id__2014": 0.6880733944954128, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2012": 0.7413793103448276, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2011": 0.7435897435897436, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2009": 0.6347826086956522, - "acc,exam_id__2010": 0.7264957264957265, - "acc,exam_id__2016": 0.6694214876033058, - "acc,exam_id__2015": 0.7478991596638656, - "acc,exam_id__2013": 0.6851851851851852 - }, - "faquad_nli": { - "f1_macro,all": 0.756614128081815, - "acc,all": 0.803076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7869372225745086, - "acc,all": 0.7942857142857143 - }, - "oab_exams": { - "acc,all": 0.4560364464692483, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2011-03": 0.40404040404040403, - "acc,exam_id__2012-09": 0.36363636363636365, - "acc,exam_id__2016-20": 0.45, - "acc,exam_id__2014-15": 0.5256410256410257, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2012-06": 0.45, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2014-13": 0.375, - "acc,exam_id__2013-10": 0.5375, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2017-23": 0.475, - "acc,exam_id__2017-22": 0.55, - "acc,exam_id__2017-24": 0.5, - "acc,exam_id__2010-01": 0.29411764705882354, - "acc,exam_id__2014-14": 0.575, - "acc,exam_id__2012-06a": 0.4375, - "acc,exam_id__2012-08": 0.5125, - "acc,exam_id__2016-20a": 0.45, - "acc,exam_id__2012-07": 0.3375, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2015-17": 0.5769230769230769, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2013-11": 0.5, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6997340136792964, - "acc,all": 0.7250293772032902 - }, - "tweetsentbr": { - "f1_macro,all": 0.5314758113730967, - "acc,all": 0.7308457711442786, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9293286384185251, + "acc,all": 0.9293300653594772, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8165192082173185, + "mse,all": 0.498736368872549, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5841446453407511, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__USP_2024": 0.8048780487804879, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__UNICAMP_2023": 0.6976744186046512, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__USP_2019": 0.575, + "acc,exam_id__USP_2020": 0.42857142857142855, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__UNICAMP_2020": 0.6181818181818182, + "acc,exam_id__USP_2022": 0.5510204081632653, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6850944716585025, + "acc,exam_id__2014": 0.6880733944954128, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2012": 0.7413793103448276, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2011": 0.7435897435897436, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2009": 0.6347826086956522, + "acc,exam_id__2010": 0.7264957264957265, + "acc,exam_id__2016": 0.6694214876033058, + "acc,exam_id__2015": 0.7478991596638656, + "acc,exam_id__2013": 0.6851851851851852 + }, + "faquad_nli": { + "f1_macro,all": 0.756614128081815, + "acc,all": 0.803076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7869372225745086, + "acc,all": 0.7942857142857143 + }, + "oab_exams": { + "acc,all": 0.4560364464692483, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2011-03": 0.40404040404040403, + "acc,exam_id__2012-09": 0.36363636363636365, + "acc,exam_id__2016-20": 0.45, + "acc,exam_id__2014-15": 0.5256410256410257, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2012-06": 0.45, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2014-13": 0.375, + "acc,exam_id__2013-10": 0.5375, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2017-23": 0.475, + "acc,exam_id__2017-22": 0.55, + "acc,exam_id__2017-24": 0.5, + "acc,exam_id__2010-01": 0.29411764705882354, + "acc,exam_id__2014-14": 0.575, + "acc,exam_id__2012-06a": 0.4375, + "acc,exam_id__2012-08": 0.5125, + "acc,exam_id__2016-20a": 0.45, + "acc,exam_id__2012-07": 0.3375, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2015-17": 0.5769230769230769, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2013-11": 0.5, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6997340136792964, + "acc,all": 0.7250293772032902 + }, + "tweetsentbr": { + "f1_macro,all": 0.708634415164129, + "acc,all": 0.7308457711442786, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "39a1c76ddb5fa3a82c5b4071121d2e4866a25300", - "model_dtype": "torch.float16", - "model_memory_footprint": 21563723776, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "39a1c76ddb5fa3a82c5b4071121d2e4866a25300", + "model_dtype": "torch.float16", + "model_memory_footprint": 21563723776, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp/results_2024-05-19T12-51-48.449854.json b/invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp/results_2024-05-19T12-51-48.449854.json index b79d6cf3750cf0e1c1d0cfa14280fce4dc829241..cc979bf28586dc92a38a614da31a96e5fa03b48b 100644 --- a/invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp/results_2024-05-19T12-51-48.449854.json +++ b/invalid-coder/Sakura-SOLAR-Instruct-CarbonVillain-en-10.7B-v2-slerp/results_2024-05-19T12-51-48.449854.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6939871762014515, - "all_grouped_npm": 0.5414012604587978, + "all_grouped_average": 0.7136714655115661, + "all_grouped_npm": 0.5706933576464684, "all_grouped": { "enem_challenge": 0.6850944716585025, "bluex": 0.5841446453407511, @@ -45,7 +45,7 @@ "faquad_nli": 0.756614128081815, "hatebr_offensive": 0.7869372225745086, "portuguese_hate_speech": 0.6997340136792964, - "tweetsentbr": 0.5314758113730967 + "tweetsentbr": 0.708634415164129 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6850944716585025, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.756614128081815, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7869372225745086, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6997340136792964, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5314758113730967 + "harness|tweetsentbr|tweetsentbr|None|25": 0.708634415164129 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6850944716585025, @@ -150,9 +150,9 @@ "main_score": 0.6997340136792964 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5314758113730967, + "f1_macro,all": 0.708634415164129, "acc,all": 0.7308457711442786, - "main_score": 0.5314758113730967 + "main_score": 0.708634415164129 } }, "config_tasks": { diff --git a/jeonsworld/CarbonVillain-en-10.7B-v4/raw_2024-05-07T13-32-38.659974/results.json b/jeonsworld/CarbonVillain-en-10.7B-v4/raw_2024-05-07T13-32-38.659974/results.json index be36e40139647adf5c03685c52f8a3b9507feec8..78e1aadcdeb65e0315321c9b5bcf321030826b7b 100644 --- a/jeonsworld/CarbonVillain-en-10.7B-v4/raw_2024-05-07T13-32-38.659974/results.json +++ b/jeonsworld/CarbonVillain-en-10.7B-v4/raw_2024-05-07T13-32-38.659974/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9305548139050568, - "acc,all": 0.9305555555555556, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8159105813521528, - "mse,all": 0.5000937344771242, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5799721835883171, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__USP_2019": 0.6, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2020": 0.42857142857142855, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6815955213435969, - "acc,exam_id__2023": 0.6296296296296297, - "acc,exam_id__2009": 0.6347826086956522, - "acc,exam_id__2014": 0.6788990825688074, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2016": 0.6776859504132231, - "acc,exam_id__2011": 0.7435897435897436, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2010": 0.7264957264957265, - "acc,exam_id__2016_2": 0.6747967479674797, - "acc,exam_id__2012": 0.7327586206896551 - }, - "faquad_nli": { - "f1_macro,all": 0.7604170323305367, - "acc,all": 0.8061538461538461, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7901253336035945, - "acc,all": 0.7971428571428572 - }, - "oab_exams": { - "acc,all": 0.4533029612756264, - "acc,exam_id__2010-01": 0.2823529411764706, - "acc,exam_id__2017-24": 0.475, - "acc,exam_id__2012-09": 0.36363636363636365, - "acc,exam_id__2012-06a": 0.4375, - "acc,exam_id__2012-06": 0.45, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2014-14": 0.5625, - "acc,exam_id__2014-13": 0.375, - "acc,exam_id__2015-17": 0.5641025641025641, - "acc,exam_id__2014-15": 0.5128205128205128, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2012-08": 0.5, - "acc,exam_id__2015-18": 0.525, - "acc,exam_id__2015-16": 0.475, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2011-03": 0.40404040404040403, - "acc,exam_id__2016-20": 0.4625, - "acc,exam_id__2013-10": 0.5, - "acc,exam_id__2016-20a": 0.45, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2017-23": 0.475, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2016-21": 0.4, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6982255842413652, - "acc,all": 0.7238542890716804 - }, - "tweetsentbr": { - "f1_macro,all": 0.5311443682198032, - "acc,all": 0.7308457711442786, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9305548139050568, + "acc,all": 0.9305555555555556, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8159105813521528, + "mse,all": 0.5000937344771242, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5799721835883171, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__USP_2019": 0.6, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2020": 0.42857142857142855, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6815955213435969, + "acc,exam_id__2023": 0.6296296296296297, + "acc,exam_id__2009": 0.6347826086956522, + "acc,exam_id__2014": 0.6788990825688074, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2016": 0.6776859504132231, + "acc,exam_id__2011": 0.7435897435897436, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2010": 0.7264957264957265, + "acc,exam_id__2016_2": 0.6747967479674797, + "acc,exam_id__2012": 0.7327586206896551 + }, + "faquad_nli": { + "f1_macro,all": 0.7604170323305367, + "acc,all": 0.8061538461538461, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7901253336035945, + "acc,all": 0.7971428571428572 + }, + "oab_exams": { + "acc,all": 0.4533029612756264, + "acc,exam_id__2010-01": 0.2823529411764706, + "acc,exam_id__2017-24": 0.475, + "acc,exam_id__2012-09": 0.36363636363636365, + "acc,exam_id__2012-06a": 0.4375, + "acc,exam_id__2012-06": 0.45, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2014-14": 0.5625, + "acc,exam_id__2014-13": 0.375, + "acc,exam_id__2015-17": 0.5641025641025641, + "acc,exam_id__2014-15": 0.5128205128205128, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2012-08": 0.5, + "acc,exam_id__2015-18": 0.525, + "acc,exam_id__2015-16": 0.475, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2011-03": 0.40404040404040403, + "acc,exam_id__2016-20": 0.4625, + "acc,exam_id__2013-10": 0.5, + "acc,exam_id__2016-20a": 0.45, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2017-23": 0.475, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2016-21": 0.4, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6982255842413652, + "acc,all": 0.7238542890716804 + }, + "tweetsentbr": { + "f1_macro,all": 0.7081924909597376, + "acc,all": 0.7308457711442786, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "57d6ad4d705d336aba228356683d9f221507440a", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 21563723776, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "57d6ad4d705d336aba228356683d9f221507440a", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 21563723776, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=jeonsworld/CarbonVillain-en-10.7B-v4,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=jeonsworld/CarbonVillain-en-10.7B-v4,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/jeonsworld/CarbonVillain-en-10.7B-v4/results_2024-05-07T13-32-38.659974.json b/jeonsworld/CarbonVillain-en-10.7B-v4/results_2024-05-07T13-32-38.659974.json index dfdafcbd3c78209454c37ca0de63977348ef2117..f0304b2af31280457530a259d2adf6e33f6ddc2c 100644 --- a/jeonsworld/CarbonVillain-en-10.7B-v4/results_2024-05-07T13-32-38.659974.json +++ b/jeonsworld/CarbonVillain-en-10.7B-v4/results_2024-05-07T13-32-38.659974.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6934720422066722, - "all_grouped_npm": 0.5412256988505026, + "all_grouped_average": 0.7131440558444426, + "all_grouped_npm": 0.5704995286686136, "all_grouped": { "enem_challenge": 0.6815955213435969, "bluex": 0.5799721835883171, @@ -45,7 +45,7 @@ "faquad_nli": 0.7604170323305367, "hatebr_offensive": 0.7901253336035945, "portuguese_hate_speech": 0.6982255842413652, - "tweetsentbr": 0.5311443682198032 + "tweetsentbr": 0.7081924909597376 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6815955213435969, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7604170323305367, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7901253336035945, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6982255842413652, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5311443682198032 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7081924909597376 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6815955213435969, @@ -150,9 +150,9 @@ "main_score": 0.6982255842413652 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5311443682198032, + "f1_macro,all": 0.7081924909597376, "acc,all": 0.7308457711442786, - "main_score": 0.5311443682198032 + "main_score": 0.7081924909597376 } }, "config_tasks": { diff --git a/jpacifico/Chocolatine-14B-Instruct-4k-DPO/raw_2024-09-01T04-36-58.986732/results.json b/jpacifico/Chocolatine-14B-Instruct-4k-DPO/raw_2024-09-01T04-36-58.986732/results.json index d5c7010019c2c407c835232d2a73153d4ce7db1f..c43e0bd76e1bedeea91bfd45fc49e49ed547442c 100644 --- a/jpacifico/Chocolatine-14B-Instruct-4k-DPO/raw_2024-09-01T04-36-58.986732/results.json +++ b/jpacifico/Chocolatine-14B-Instruct-4k-DPO/raw_2024-09-01T04-36-58.986732/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9309026075700955, - "acc,all": 0.9309640522875817, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7151811232953084, - "mse,all": 0.7454983660130718, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.6216968011126565, - "acc,exam_id__USP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2020": 0.6181818181818182, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__USP_2021": 0.6346153846153846, - "acc,exam_id__USP_2023": 0.7272727272727273, - "acc,exam_id__USP_2020": 0.6428571428571429, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2019": 0.575, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__UNICAMP_2023": 0.6511627906976745, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__USP_2022": 0.6326530612244898, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.722183344996501, - "acc,exam_id__2012": 0.7758620689655172, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2011": 0.811965811965812, - "acc,exam_id__2022": 0.6541353383458647, - "acc,exam_id__2009": 0.6695652173913044, - "acc,exam_id__2016_2": 0.7235772357723578, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2014": 0.7247706422018348, - "acc,exam_id__2016": 0.6859504132231405, - "acc,exam_id__2023": 0.7703703703703704, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2010": 0.7435897435897436 - }, - "faquad_nli": { - "f1_macro,all": 0.6798499329309189, - "acc,all": 0.7107692307692308, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8574935502655591, - "acc,all": 0.8592857142857143 - }, - "oab_exams": { - "acc,all": 0.5175398633257403, - "acc,exam_id__2012-06a": 0.5625, - "acc,exam_id__2018-25": 0.4625, - "acc,exam_id__2011-05": 0.6375, - "acc,exam_id__2011-04": 0.4875, - "acc,exam_id__2012-07": 0.55, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2014-13": 0.4625, - "acc,exam_id__2015-16": 0.5125, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2016-21": 0.4875, - "acc,exam_id__2012-06": 0.5625, - "acc,exam_id__2017-24": 0.525, - "acc,exam_id__2012-08": 0.6125, - "acc,exam_id__2015-17": 0.5897435897435898, - "acc,exam_id__2016-20a": 0.4625, - "acc,exam_id__2013-12": 0.5375, - "acc,exam_id__2013-11": 0.5375, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2016-20": 0.5125, - "acc,exam_id__2012-09": 0.44155844155844154, - "acc,exam_id__2010-02": 0.53, - "acc,exam_id__2014-15": 0.6025641025641025, - "acc,exam_id__2011-03": 0.48484848484848486, - "acc,exam_id__2013-10": 0.5, - "acc,exam_id__2017-22": 0.55, - "acc,exam_id__2016-19": 0.48717948717948717, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7456087906650724, - "acc,all": 0.7955346650998825 - }, - "tweetsentbr": { - "f1_macro,all": 0.49648073415134397, - "acc,all": 0.7034825870646766, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9309026075700955, + "acc,all": 0.9309640522875817, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7151811232953084, + "mse,all": 0.7454983660130718, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.6216968011126565, + "acc,exam_id__USP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2020": 0.6181818181818182, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__USP_2021": 0.6346153846153846, + "acc,exam_id__USP_2023": 0.7272727272727273, + "acc,exam_id__USP_2020": 0.6428571428571429, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2019": 0.575, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__UNICAMP_2023": 0.6511627906976745, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__USP_2022": 0.6326530612244898, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.722183344996501, + "acc,exam_id__2012": 0.7758620689655172, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2011": 0.811965811965812, + "acc,exam_id__2022": 0.6541353383458647, + "acc,exam_id__2009": 0.6695652173913044, + "acc,exam_id__2016_2": 0.7235772357723578, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2014": 0.7247706422018348, + "acc,exam_id__2016": 0.6859504132231405, + "acc,exam_id__2023": 0.7703703703703704, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2010": 0.7435897435897436 + }, + "faquad_nli": { + "f1_macro,all": 0.6798499329309189, + "acc,all": 0.7107692307692308, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8574935502655591, + "acc,all": 0.8592857142857143 + }, + "oab_exams": { + "acc,all": 0.5175398633257403, + "acc,exam_id__2012-06a": 0.5625, + "acc,exam_id__2018-25": 0.4625, + "acc,exam_id__2011-05": 0.6375, + "acc,exam_id__2011-04": 0.4875, + "acc,exam_id__2012-07": 0.55, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2014-13": 0.4625, + "acc,exam_id__2015-16": 0.5125, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2016-21": 0.4875, + "acc,exam_id__2012-06": 0.5625, + "acc,exam_id__2017-24": 0.525, + "acc,exam_id__2012-08": 0.6125, + "acc,exam_id__2015-17": 0.5897435897435898, + "acc,exam_id__2016-20a": 0.4625, + "acc,exam_id__2013-12": 0.5375, + "acc,exam_id__2013-11": 0.5375, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2016-20": 0.5125, + "acc,exam_id__2012-09": 0.44155844155844154, + "acc,exam_id__2010-02": 0.53, + "acc,exam_id__2014-15": 0.6025641025641025, + "acc,exam_id__2011-03": 0.48484848484848486, + "acc,exam_id__2013-10": 0.5, + "acc,exam_id__2017-22": 0.55, + "acc,exam_id__2016-19": 0.48717948717948717, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7456087906650724, + "acc,all": 0.7955346650998825 + }, + "tweetsentbr": { + "f1_macro,all": 0.6619743122017919, + "acc,all": 0.7034825870646766, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "00c1adf08703ba23ebf70296de66d8bdaec5bb92", - "model_dtype": "torch.float16", - "model_memory_footprint": 27920476160, - "model_num_parameters": 13960238080, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1268.9889705882354, - "min_seq_length": 1246, - "max_seq_length": 1335, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1499.9889705882354, - "min_seq_length": 1477, - "max_seq_length": 1566, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1609.7426981919332, - "min_seq_length": 1243, - "max_seq_length": 2369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1488.9881035689293, - "min_seq_length": 1236, - "max_seq_length": 2528, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1478.1184615384616, - "min_seq_length": 1426, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "00c1adf08703ba23ebf70296de66d8bdaec5bb92", + "model_dtype": "torch.float16", + "model_memory_footprint": 27920476160, + "model_num_parameters": 13960238080, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1262.9178571428572, - "min_seq_length": 1239, - "max_seq_length": 1509, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1258.4145785876992, - "min_seq_length": 1003, - "max_seq_length": 1740, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1268.9889705882354, + "min_seq_length": 1246, + "max_seq_length": 1335, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1499.9889705882354, + "min_seq_length": 1477, + "max_seq_length": 1566, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1609.7426981919332, + "min_seq_length": 1243, + "max_seq_length": 2369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1488.9881035689293, + "min_seq_length": 1236, + "max_seq_length": 2528, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1478.1184615384616, + "min_seq_length": 1426, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1262.9178571428572, + "min_seq_length": 1239, + "max_seq_length": 1509, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1258.4145785876992, + "min_seq_length": 1003, + "max_seq_length": 1740, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1751.801410105758, + "min_seq_length": 1717, + "max_seq_length": 1795, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1518.6845771144278, + "min_seq_length": 1497, + "max_seq_length": 1636, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1751.801410105758, - "min_seq_length": 1717, - "max_seq_length": 1795, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=jpacifico/Chocolatine-14B-Instruct-4k-DPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1518.6845771144278, - "min_seq_length": 1497, - "max_seq_length": 1636, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=jpacifico/Chocolatine-14B-Instruct-4k-DPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/jpacifico/Chocolatine-14B-Instruct-4k-DPO/results_2024-09-01T04-36-58.986732.json b/jpacifico/Chocolatine-14B-Instruct-4k-DPO/results_2024-09-01T04-36-58.986732.json index a72a1346e2b2952429a34163043ab821ca1630e5..edd81a8975e04929684343c334d713720f0d316d 100644 --- a/jpacifico/Chocolatine-14B-Instruct-4k-DPO/results_2024-09-01T04-36-58.986732.json +++ b/jpacifico/Chocolatine-14B-Instruct-4k-DPO/results_2024-09-01T04-36-58.986732.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6985485275903552, - "all_grouped_npm": 0.5541354032958618, + "all_grouped_average": 0.7169367029292939, + "all_grouped_npm": 0.5814987594549967, "all_grouped": { "enem_challenge": 0.722183344996501, "bluex": 0.6216968011126565, @@ -45,7 +45,7 @@ "faquad_nli": 0.6798499329309189, "hatebr_offensive": 0.8574935502655591, "portuguese_hate_speech": 0.7456087906650724, - "tweetsentbr": 0.49648073415134397 + "tweetsentbr": 0.6619743122017919 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.722183344996501, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6798499329309189, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8574935502655591, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7456087906650724, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49648073415134397 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6619743122017919 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.722183344996501, @@ -150,9 +150,9 @@ "main_score": 0.7456087906650724 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49648073415134397, + "f1_macro,all": 0.6619743122017919, "acc,all": 0.7034825870646766, - "main_score": 0.49648073415134397 + "main_score": 0.6619743122017919 } }, "config_tasks": { diff --git a/jsfs11/MixtureofMerges-MoE-4x7b-v4/raw_2024-05-19T03-46-55.771244/results.json b/jsfs11/MixtureofMerges-MoE-4x7b-v4/raw_2024-05-19T03-46-55.771244/results.json index b6b6d026d18ac0e570e71bf6ce9852b82f4dcca8..eb002e5f19f160fd76c4652676be66cabd72c44c 100644 --- a/jsfs11/MixtureofMerges-MoE-4x7b-v4/raw_2024-05-19T03-46-55.771244/results.json +++ b/jsfs11/MixtureofMerges-MoE-4x7b-v4/raw_2024-05-19T03-46-55.771244/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9223851546589933, - "acc,all": 0.9223856209150327, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7765615051221151, - "mse,all": 0.4353669934640523, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.541029207232267, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__USP_2021": 0.4807692307692308, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6375087473757872, - "acc,exam_id__2009": 0.6434782608695652, - "acc,exam_id__2016_2": 0.6178861788617886, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2011": 0.6752136752136753, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2023": 0.6296296296296297, - "acc,exam_id__2012": 0.6206896551724138 - }, - "faquad_nli": { - "f1_macro,all": 0.7848580929549145, - "acc,all": 0.8353846153846154, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8337715452765142, - "acc,all": 0.8364285714285714 - }, - "oab_exams": { - "acc,all": 0.41275626423690204, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2015-18": 0.4, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2013-11": 0.475, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2017-22": 0.55, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2015-17": 0.5256410256410257, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2015-16": 0.3625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6870894564309664, - "acc,all": 0.7226792009400705 - }, - "tweetsentbr": { - "f1_macro,all": 0.4903198699192593, - "acc,all": 0.7039800995024875, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9223851546589933, + "acc,all": 0.9223856209150327, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7765615051221151, + "mse,all": 0.4353669934640523, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.541029207232267, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__USP_2021": 0.4807692307692308, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6375087473757872, + "acc,exam_id__2009": 0.6434782608695652, + "acc,exam_id__2016_2": 0.6178861788617886, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2011": 0.6752136752136753, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2023": 0.6296296296296297, + "acc,exam_id__2012": 0.6206896551724138 + }, + "faquad_nli": { + "f1_macro,all": 0.7848580929549145, + "acc,all": 0.8353846153846154, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8337715452765142, + "acc,all": 0.8364285714285714 + }, + "oab_exams": { + "acc,all": 0.41275626423690204, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2015-18": 0.4, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2013-11": 0.475, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2017-22": 0.55, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2015-17": 0.5256410256410257, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2015-16": 0.3625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6870894564309664, + "acc,all": 0.7226792009400705 + }, + "tweetsentbr": { + "f1_macro,all": 0.6537598265590124, + "acc,all": 0.7039800995024875, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2b98406f20a874184dbffb5ed24e1f4b5063ec4b", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2b98406f20a874184dbffb5ed24e1f4b5063ec4b", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=jsfs11/MixtureofMerges-MoE-4x7b-v4,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=jsfs11/MixtureofMerges-MoE-4x7b-v4,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/jsfs11/MixtureofMerges-MoE-4x7b-v4/results_2024-05-19T03-46-55.771244.json b/jsfs11/MixtureofMerges-MoE-4x7b-v4/results_2024-05-19T03-46-55.771244.json index 3b8b49f26eedae46b6cac40384a04a043da9b2dd..7bbf738e136218beeccee1574602f8f4ae4501c1 100644 --- a/jsfs11/MixtureofMerges-MoE-4x7b-v4/results_2024-05-19T03-46-55.771244.json +++ b/jsfs11/MixtureofMerges-MoE-4x7b-v4/results_2024-05-19T03-46-55.771244.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6762533159119689, - "all_grouped_npm": 0.5228909710610377, + "all_grouped_average": 0.6944133110941636, + "all_grouped_npm": 0.5499147734154941, "all_grouped": { "enem_challenge": 0.6375087473757872, "bluex": 0.541029207232267, @@ -45,7 +45,7 @@ "faquad_nli": 0.7848580929549145, "hatebr_offensive": 0.8337715452765142, "portuguese_hate_speech": 0.6870894564309664, - "tweetsentbr": 0.4903198699192593 + "tweetsentbr": 0.6537598265590124 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6375087473757872, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7848580929549145, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8337715452765142, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6870894564309664, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4903198699192593 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6537598265590124 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6375087473757872, @@ -150,9 +150,9 @@ "main_score": 0.6870894564309664 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4903198699192593, + "f1_macro,all": 0.6537598265590124, "acc,all": 0.7039800995024875, - "main_score": 0.4903198699192593 + "main_score": 0.6537598265590124 } }, "config_tasks": { diff --git a/jsfs11/MixtureofMerges-MoE-4x7b-v5/raw_2024-06-29T04-47-16.492324/results.json b/jsfs11/MixtureofMerges-MoE-4x7b-v5/raw_2024-06-29T04-47-16.492324/results.json index 7be65db0424fade939066a0de621b6e76de1bd7d..b421b411cf3193630eee528ebce992481f910c26 100644 --- a/jsfs11/MixtureofMerges-MoE-4x7b-v5/raw_2024-06-29T04-47-16.492324/results.json +++ b/jsfs11/MixtureofMerges-MoE-4x7b-v5/raw_2024-06-29T04-47-16.492324/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9236107924352364, - "acc,all": 0.9236111111111112, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7773330335553215, - "mse,all": 0.4334313725490196, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5479833101529903, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6340097970608818, - "acc,exam_id__2015": 0.6050420168067226, - "acc,exam_id__2023": 0.6592592592592592, - "acc,exam_id__2010": 0.6752136752136753, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2011": 0.6752136752136753, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2017": 0.6379310344827587, - "acc,exam_id__2014": 0.5963302752293578, - "acc,exam_id__2016_2": 0.6097560975609756, - "acc,exam_id__2009": 0.6347826086956522, - "acc,exam_id__2022": 0.6090225563909775 - }, - "faquad_nli": { - "f1_macro,all": 0.7823323438856359, - "acc,all": 0.8415384615384616, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8094954980227307, - "acc,all": 0.8142857142857143 - }, - "oab_exams": { - "acc,all": 0.42414578587699314, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2013-12": 0.4125, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2016-19": 0.5384615384615384, - "acc,exam_id__2015-17": 0.5384615384615384, - "acc,exam_id__2010-01": 0.4, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2015-18": 0.4, - "acc,exam_id__2017-24": 0.4, - "acc,exam_id__2018-25": 0.4625, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2017-22": 0.4875, - "acc,exam_id__2012-09": 0.38961038961038963, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2012-06a": 0.3875, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2011-03": 0.3434343434343434, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7039041954929806, - "acc,all": 0.7473560517038778 - }, - "tweetsentbr": { - "f1_macro,all": 0.49094641809438266, - "acc,all": 0.7049751243781095, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9236107924352364, + "acc,all": 0.9236111111111112, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7773330335553215, + "mse,all": 0.4334313725490196, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5479833101529903, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6340097970608818, + "acc,exam_id__2015": 0.6050420168067226, + "acc,exam_id__2023": 0.6592592592592592, + "acc,exam_id__2010": 0.6752136752136753, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2011": 0.6752136752136753, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2017": 0.6379310344827587, + "acc,exam_id__2014": 0.5963302752293578, + "acc,exam_id__2016_2": 0.6097560975609756, + "acc,exam_id__2009": 0.6347826086956522, + "acc,exam_id__2022": 0.6090225563909775 + }, + "faquad_nli": { + "f1_macro,all": 0.7823323438856359, + "acc,all": 0.8415384615384616, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8094954980227307, + "acc,all": 0.8142857142857143 + }, + "oab_exams": { + "acc,all": 0.42414578587699314, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2013-12": 0.4125, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2016-19": 0.5384615384615384, + "acc,exam_id__2015-17": 0.5384615384615384, + "acc,exam_id__2010-01": 0.4, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2015-18": 0.4, + "acc,exam_id__2017-24": 0.4, + "acc,exam_id__2018-25": 0.4625, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2017-22": 0.4875, + "acc,exam_id__2012-09": 0.38961038961038963, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2012-06a": 0.3875, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2011-03": 0.3434343434343434, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7039041954929806, + "acc,all": 0.7473560517038778 + }, + "tweetsentbr": { + "f1_macro,all": 0.6545952241258436, + "acc,all": 0.7049751243781095, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c1b5ce7144b966062df7627d2482a59e0df3757c", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c1b5ce7144b966062df7627d2482a59e0df3757c", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=jsfs11/MixtureofMerges-MoE-4x7b-v5,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=jsfs11/MixtureofMerges-MoE-4x7b-v5,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/jsfs11/MixtureofMerges-MoE-4x7b-v5/results_2024-06-29T04-47-16.492324.json b/jsfs11/MixtureofMerges-MoE-4x7b-v5/results_2024-06-29T04-47-16.492324.json index 4d4da49f038b369e08e561aa23d4818948685ad5..47c228563359fc382862e76d06481ad6e5fe2c04 100644 --- a/jsfs11/MixtureofMerges-MoE-4x7b-v5/results_2024-06-29T04-47-16.492324.json +++ b/jsfs11/MixtureofMerges-MoE-4x7b-v5/results_2024-06-29T04-47-16.492324.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.677084574953017, - "all_grouped_npm": 0.5232264710683194, + "all_grouped_average": 0.6952677756231793, + "all_grouped_npm": 0.5502848053989182, "all_grouped": { "enem_challenge": 0.6340097970608818, "bluex": 0.5479833101529903, @@ -45,7 +45,7 @@ "faquad_nli": 0.7823323438856359, "hatebr_offensive": 0.8094954980227307, "portuguese_hate_speech": 0.7039041954929806, - "tweetsentbr": 0.49094641809438266 + "tweetsentbr": 0.6545952241258436 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6340097970608818, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7823323438856359, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8094954980227307, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7039041954929806, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49094641809438266 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6545952241258436 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6340097970608818, @@ -150,9 +150,9 @@ "main_score": 0.7039041954929806 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49094641809438266, + "f1_macro,all": 0.6545952241258436, "acc,all": 0.7049751243781095, - "main_score": 0.49094641809438266 + "main_score": 0.6545952241258436 } }, "config_tasks": { diff --git a/kekmodel/StopCarbon-10.7B-v5/raw_2024-05-21T02-58-06.102309/results.json b/kekmodel/StopCarbon-10.7B-v5/raw_2024-05-21T02-58-06.102309/results.json index 5964fa81823200f8ab237cf5f27e2e7cdeaadc0d..709297ea21277480463388195036ae7f7e639d5f 100644 --- a/kekmodel/StopCarbon-10.7B-v5/raw_2024-05-21T02-58-06.102309/results.json +++ b/kekmodel/StopCarbon-10.7B-v5/raw_2024-05-21T02-58-06.102309/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9293286384185251, - "acc,all": 0.9293300653594772, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8169705299837213, - "mse,all": 0.49864491625816987, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5813630041724618, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__USP_2020": 0.44642857142857145, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__UNICAMP_2023": 0.6976744186046512, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6878936319104269, - "acc,exam_id__2022": 0.6390977443609023, - "acc,exam_id__2016_2": 0.6910569105691057, - "acc,exam_id__2011": 0.7435897435897436, - "acc,exam_id__2017": 0.6896551724137931, - "acc,exam_id__2012": 0.7413793103448276, - "acc,exam_id__2009": 0.6347826086956522, - "acc,exam_id__2010": 0.717948717948718, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2016": 0.6859504132231405, - "acc,exam_id__2014": 0.6788990825688074, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2023": 0.6370370370370371 - }, - "faquad_nli": { - "f1_macro,all": 0.7634310134310134, - "acc,all": 0.8092307692307692, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7870538006037701, - "acc,all": 0.7942857142857143 - }, - "oab_exams": { - "acc,all": 0.4619589977220957, - "acc,exam_id__2011-03": 0.42424242424242425, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2015-17": 0.5769230769230769, - "acc,exam_id__2017-24": 0.5, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2014-14": 0.5625, - "acc,exam_id__2012-08": 0.525, - "acc,exam_id__2012-06": 0.45, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2015-18": 0.5375, - "acc,exam_id__2011-05": 0.4875, - "acc,exam_id__2012-09": 0.36363636363636365, - "acc,exam_id__2016-20a": 0.45, - "acc,exam_id__2017-22": 0.5625, - "acc,exam_id__2012-06a": 0.4375, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2016-20": 0.45, - "acc,exam_id__2014-15": 0.5256410256410257, - "acc,exam_id__2010-01": 0.29411764705882354, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2013-10": 0.5125, - "acc,exam_id__2010-02": 0.46, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2016-21": 0.3875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7023003896307556, - "acc,all": 0.72737955346651 - }, - "tweetsentbr": { - "f1_macro,all": 0.5356193432848626, - "acc,all": 0.735820895522388, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9293286384185251, + "acc,all": 0.9293300653594772, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8169705299837213, + "mse,all": 0.49864491625816987, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5813630041724618, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__USP_2020": 0.44642857142857145, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__UNICAMP_2023": 0.6976744186046512, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6878936319104269, + "acc,exam_id__2022": 0.6390977443609023, + "acc,exam_id__2016_2": 0.6910569105691057, + "acc,exam_id__2011": 0.7435897435897436, + "acc,exam_id__2017": 0.6896551724137931, + "acc,exam_id__2012": 0.7413793103448276, + "acc,exam_id__2009": 0.6347826086956522, + "acc,exam_id__2010": 0.717948717948718, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2016": 0.6859504132231405, + "acc,exam_id__2014": 0.6788990825688074, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2023": 0.6370370370370371 + }, + "faquad_nli": { + "f1_macro,all": 0.7634310134310134, + "acc,all": 0.8092307692307692, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7870538006037701, + "acc,all": 0.7942857142857143 + }, + "oab_exams": { + "acc,all": 0.4619589977220957, + "acc,exam_id__2011-03": 0.42424242424242425, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2015-17": 0.5769230769230769, + "acc,exam_id__2017-24": 0.5, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2014-14": 0.5625, + "acc,exam_id__2012-08": 0.525, + "acc,exam_id__2012-06": 0.45, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2015-18": 0.5375, + "acc,exam_id__2011-05": 0.4875, + "acc,exam_id__2012-09": 0.36363636363636365, + "acc,exam_id__2016-20a": 0.45, + "acc,exam_id__2017-22": 0.5625, + "acc,exam_id__2012-06a": 0.4375, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2016-20": 0.45, + "acc,exam_id__2014-15": 0.5256410256410257, + "acc,exam_id__2010-01": 0.29411764705882354, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2013-10": 0.5125, + "acc,exam_id__2010-02": 0.46, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2016-21": 0.3875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7023003896307556, + "acc,all": 0.72737955346651 + }, + "tweetsentbr": { + "f1_macro,all": 0.7141591243798168, + "acc,all": 0.735820895522388, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "7d59819dce2439f6c83b4f5c21a68aa882ff5ac9", - "model_dtype": "torch.float16", - "model_memory_footprint": 21463060480, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "7d59819dce2439f6c83b4f5c21a68aa882ff5ac9", + "model_dtype": "torch.float16", + "model_memory_footprint": 21463060480, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=kekmodel/StopCarbon-10.7B-v5,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=kekmodel/StopCarbon-10.7B-v5,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/kekmodel/StopCarbon-10.7B-v5/results_2024-05-21T02-58-06.102309.json b/kekmodel/StopCarbon-10.7B-v5/results_2024-05-21T02-58-06.102309.json index 9815d236e1438121061bf81fcbecaaea3c2c8cad..a6e5db706e15b83cb13e9ba265b5f43c3069edc4 100644 --- a/kekmodel/StopCarbon-10.7B-v5/results_2024-05-21T02-58-06.102309.json +++ b/kekmodel/StopCarbon-10.7B-v5/results_2024-05-21T02-58-06.102309.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6962132610175147, - "all_grouped_npm": 0.5449694630848425, + "all_grouped_average": 0.7160510144725096, + "all_grouped_npm": 0.5744899295357278, "all_grouped": { "enem_challenge": 0.6878936319104269, "bluex": 0.5813630041724618, @@ -45,7 +45,7 @@ "faquad_nli": 0.7634310134310134, "hatebr_offensive": 0.7870538006037701, "portuguese_hate_speech": 0.7023003896307556, - "tweetsentbr": 0.5356193432848626 + "tweetsentbr": 0.7141591243798168 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6878936319104269, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7634310134310134, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7870538006037701, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7023003896307556, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5356193432848626 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7141591243798168 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6878936319104269, @@ -150,9 +150,9 @@ "main_score": 0.7023003896307556 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5356193432848626, + "f1_macro,all": 0.7141591243798168, "acc,all": 0.735820895522388, - "main_score": 0.5356193432848626 + "main_score": 0.7141591243798168 } }, "config_tasks": { diff --git a/liminerity/M7-7b/raw_2024-07-13T18-52-55.048115/results.json b/liminerity/M7-7b/raw_2024-07-13T18-52-55.048115/results.json index f0b683e17da430a03652d91c6991feba78988c82..894d4c92a0786e3768cfc54ff888a14637e31a33 100644 --- a/liminerity/M7-7b/raw_2024-07-13T18-52-55.048115/results.json +++ b/liminerity/M7-7b/raw_2024-07-13T18-52-55.048115/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9219689860564255, - "acc,all": 0.9219771241830066, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7788708607433608, - "mse,all": 0.42756127450980397, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.545201668984701, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2021": 0.5, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__UNICAMP_2019": 0.54, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6375087473757872, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2016_2": 0.5853658536585366, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2016": 0.5950413223140496 - }, - "faquad_nli": { - "f1_macro,all": 0.7785648031031285, - "acc,all": 0.8446153846153847, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8195498766124012, - "acc,all": 0.8235714285714286 - }, - "oab_exams": { - "acc,all": 0.41594533029612757, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2012-06": 0.475, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2015-17": 0.5128205128205128, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2017-22": 0.5375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7033809689787383, - "acc,all": 0.7555816686251469 - }, - "tweetsentbr": { - "f1_macro,all": 0.4935747654260173, - "acc,all": 0.7059701492537314, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9219689860564255, + "acc,all": 0.9219771241830066, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7788708607433608, + "mse,all": 0.42756127450980397, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.545201668984701, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2021": 0.5, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__UNICAMP_2019": 0.54, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6375087473757872, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2016_2": 0.5853658536585366, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2016": 0.5950413223140496 + }, + "faquad_nli": { + "f1_macro,all": 0.7785648031031285, + "acc,all": 0.8446153846153847, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8195498766124012, + "acc,all": 0.8235714285714286 + }, + "oab_exams": { + "acc,all": 0.41594533029612757, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2012-06": 0.475, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2015-17": 0.5128205128205128, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2017-22": 0.5375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7033809689787383, + "acc,all": 0.7555816686251469 + }, + "tweetsentbr": { + "f1_macro,all": 0.6580996872346897, + "acc,all": 0.7059701492537314, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "9b8a6b02683dc88777ead09c81baae2a06b14294", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14483472384, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "9b8a6b02683dc88777ead09c81baae2a06b14294", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14483472384, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=liminerity/M7-7b,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=liminerity/M7-7b,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/liminerity/M7-7b/results_2024-07-13T18-52-55.048115.json b/liminerity/M7-7b/results_2024-07-13T18-52-55.048115.json index b4c5c41cadd9f23e540584050bd4020e20ebb59b..a674cc4464edf7610c8ab25039334b65acdb5f44 100644 --- a/liminerity/M7-7b/results_2024-07-13T18-52-55.048115.json +++ b/liminerity/M7-7b/results_2024-07-13T18-52-55.048115.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6771740008418541, - "all_grouped_npm": 0.5236925639003447, + "all_grouped_average": 0.6954545477094843, + "all_grouped_npm": 0.5508957586438421, "all_grouped": { "enem_challenge": 0.6375087473757872, "bluex": 0.545201668984701, @@ -45,7 +45,7 @@ "faquad_nli": 0.7785648031031285, "hatebr_offensive": 0.8195498766124012, "portuguese_hate_speech": 0.7033809689787383, - "tweetsentbr": 0.4935747654260173 + "tweetsentbr": 0.6580996872346897 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6375087473757872, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7785648031031285, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8195498766124012, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7033809689787383, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4935747654260173 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6580996872346897 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6375087473757872, @@ -150,9 +150,9 @@ "main_score": 0.7033809689787383 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4935747654260173, + "f1_macro,all": 0.6580996872346897, "acc,all": 0.7059701492537314, - "main_score": 0.4935747654260173 + "main_score": 0.6580996872346897 } }, "config_tasks": { diff --git a/lrds-code/boana-7b-instruct/raw_2024-02-18T04-08-18.900803/results.json b/lrds-code/boana-7b-instruct/raw_2024-02-18T04-08-18.900803/results.json index 9fc1ddd5853fba0c62810d3b55b5b860501159ef..b9742e632de5962486d751d7a44f55888d5ef39f 100644 --- a/lrds-code/boana-7b-instruct/raw_2024-02-18T04-08-18.900803/results.json +++ b/lrds-code/boana-7b-instruct/raw_2024-02-18T04-08-18.900803/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.4883903675731297, - "acc,all": 0.5506535947712419, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.3755961900599195, - "mse,all": 1.5730269607843137, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.29207232267037553, - "acc,exam_id__UNICAMP_2020": 0.3090909090909091, - "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, - "acc,exam_id__USP_2018": 0.2222222222222222, - "acc,exam_id__UNICAMP_2023": 0.32558139534883723, - "acc,exam_id__USP_2024": 0.2926829268292683, - "acc,exam_id__UNICAMP_2021_1": 0.1956521739130435, - "acc,exam_id__USP_2020": 0.25, - "acc,exam_id__USP_2019": 0.3, - "acc,exam_id__USP_2023": 0.13636363636363635, - "acc,exam_id__UNICAMP_2019": 0.34, - "acc,exam_id__USP_2022": 0.14285714285714285, - "acc,exam_id__UNICAMP_2024": 0.4222222222222222, - "acc,exam_id__USP_2021": 0.28846153846153844, - "acc,exam_id__UNICAMP_2022": 0.38461538461538464, - "acc,exam_id__UNICAMP_2018": 0.37037037037037035, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.21623512946116166, - "acc,exam_id__2023": 0.21481481481481482, - "acc,exam_id__2010": 0.23076923076923078, - "acc,exam_id__2016_2": 0.21951219512195122, - "acc,exam_id__2011": 0.23076923076923078, - "acc,exam_id__2022": 0.21804511278195488, - "acc,exam_id__2015": 0.20168067226890757, - "acc,exam_id__2013": 0.23148148148148148, - "acc,exam_id__2014": 0.21100917431192662, - "acc,exam_id__2009": 0.14782608695652175, - "acc,exam_id__2016": 0.23140495867768596, - "acc,exam_id__2012": 0.25862068965517243, - "acc,exam_id__2017": 0.19827586206896552 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8499559054089365, - "acc,all": 0.85 - }, - "oab_exams": { - "acc,all": 0.2715261958997722, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2015-17": 0.21794871794871795, - "acc,exam_id__2014-13": 0.2125, - "acc,exam_id__2018-25": 0.2625, - "acc,exam_id__2017-22": 0.2375, - "acc,exam_id__2014-14": 0.2375, - "acc,exam_id__2013-11": 0.325, - "acc,exam_id__2015-18": 0.3, - "acc,exam_id__2011-05": 0.3375, - "acc,exam_id__2012-08": 0.225, - "acc,exam_id__2012-06a": 0.3125, - "acc,exam_id__2017-24": 0.2625, - "acc,exam_id__2012-07": 0.325, - "acc,exam_id__2016-20a": 0.2125, - "acc,exam_id__2016-21": 0.25, - "acc,exam_id__2016-19": 0.38461538461538464, - "acc,exam_id__2010-01": 0.27058823529411763, - "acc,exam_id__2011-04": 0.275, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2017-23": 0.3, - "acc,exam_id__2012-09": 0.2597402597402597, - "acc,exam_id__2013-12": 0.275, - "acc,exam_id__2013-10": 0.2375, - "acc,exam_id__2011-03": 0.21212121212121213, - "acc,exam_id__2015-16": 0.3, - "acc,exam_id__2010-02": 0.3, - "acc,exam_id__2014-15": 0.20512820512820512, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6742912511607251, - "acc,all": 0.7215041128084606 - }, - "tweetsentbr": { - "f1_macro,all": 0.4037922571146918, - "acc,all": 0.6228855721393035, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.4883903675731297, + "acc,all": 0.5506535947712419, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.3755961900599195, + "mse,all": 1.5730269607843137, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.29207232267037553, + "acc,exam_id__UNICAMP_2020": 0.3090909090909091, + "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, + "acc,exam_id__USP_2018": 0.2222222222222222, + "acc,exam_id__UNICAMP_2023": 0.32558139534883723, + "acc,exam_id__USP_2024": 0.2926829268292683, + "acc,exam_id__UNICAMP_2021_1": 0.1956521739130435, + "acc,exam_id__USP_2020": 0.25, + "acc,exam_id__USP_2019": 0.3, + "acc,exam_id__USP_2023": 0.13636363636363635, + "acc,exam_id__UNICAMP_2019": 0.34, + "acc,exam_id__USP_2022": 0.14285714285714285, + "acc,exam_id__UNICAMP_2024": 0.4222222222222222, + "acc,exam_id__USP_2021": 0.28846153846153844, + "acc,exam_id__UNICAMP_2022": 0.38461538461538464, + "acc,exam_id__UNICAMP_2018": 0.37037037037037035, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.21623512946116166, + "acc,exam_id__2023": 0.21481481481481482, + "acc,exam_id__2010": 0.23076923076923078, + "acc,exam_id__2016_2": 0.21951219512195122, + "acc,exam_id__2011": 0.23076923076923078, + "acc,exam_id__2022": 0.21804511278195488, + "acc,exam_id__2015": 0.20168067226890757, + "acc,exam_id__2013": 0.23148148148148148, + "acc,exam_id__2014": 0.21100917431192662, + "acc,exam_id__2009": 0.14782608695652175, + "acc,exam_id__2016": 0.23140495867768596, + "acc,exam_id__2012": 0.25862068965517243, + "acc,exam_id__2017": 0.19827586206896552 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8499559054089365, + "acc,all": 0.85 + }, + "oab_exams": { + "acc,all": 0.2715261958997722, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2015-17": 0.21794871794871795, + "acc,exam_id__2014-13": 0.2125, + "acc,exam_id__2018-25": 0.2625, + "acc,exam_id__2017-22": 0.2375, + "acc,exam_id__2014-14": 0.2375, + "acc,exam_id__2013-11": 0.325, + "acc,exam_id__2015-18": 0.3, + "acc,exam_id__2011-05": 0.3375, + "acc,exam_id__2012-08": 0.225, + "acc,exam_id__2012-06a": 0.3125, + "acc,exam_id__2017-24": 0.2625, + "acc,exam_id__2012-07": 0.325, + "acc,exam_id__2016-20a": 0.2125, + "acc,exam_id__2016-21": 0.25, + "acc,exam_id__2016-19": 0.38461538461538464, + "acc,exam_id__2010-01": 0.27058823529411763, + "acc,exam_id__2011-04": 0.275, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2017-23": 0.3, + "acc,exam_id__2012-09": 0.2597402597402597, + "acc,exam_id__2013-12": 0.275, + "acc,exam_id__2013-10": 0.2375, + "acc,exam_id__2011-03": 0.21212121212121213, + "acc,exam_id__2015-16": 0.3, + "acc,exam_id__2010-02": 0.3, + "acc,exam_id__2014-15": 0.20512820512820512, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6742912511607251, + "acc,all": 0.7215041128084606 + }, + "tweetsentbr": { + "f1_macro,all": 0.5383896761529224, + "acc,all": 0.6228855721393035, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "96039b02ee677c99abdc0dc56824fcd69cc1c229", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 13543948288, - "model_num_parameters": 6738415616, - "model_is_loaded_in_4bit": false, - "model_is_loaded_in_8bit": false, - "model_is_quantized": null, - "model_device": "cuda:1", - "batch_size": 8, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1426.9889705882354, - "min_seq_length": 1404, - "max_seq_length": 1493, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1665.9889705882354, - "min_seq_length": 1643, - "max_seq_length": 1732, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1710.7426981919332, - "min_seq_length": 1344, - "max_seq_length": 2470, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1589.9881035689293, - "min_seq_length": 1337, - "max_seq_length": 2629, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1649.1184615384616, - "min_seq_length": 1597, - "max_seq_length": 1756, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1486.9178571428572, - "min_seq_length": 1463, - "max_seq_length": 1733, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "96039b02ee677c99abdc0dc56824fcd69cc1c229", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 13543948288, + "model_num_parameters": 6738415616, + "model_is_loaded_in_4bit": false, + "model_is_loaded_in_8bit": false, + "model_is_quantized": null, + "model_device": "cuda:1", + "batch_size": 8, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1362.4145785876992, - "min_seq_length": 1107, - "max_seq_length": 1844, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1426.9889705882354, + "min_seq_length": 1404, + "max_seq_length": 1493, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1665.9889705882354, + "min_seq_length": 1643, + "max_seq_length": 1732, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1710.7426981919332, + "min_seq_length": 1344, + "max_seq_length": 2470, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1589.9881035689293, + "min_seq_length": 1337, + "max_seq_length": 2629, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1649.1184615384616, + "min_seq_length": 1597, + "max_seq_length": 1756, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1486.9178571428572, + "min_seq_length": 1463, + "max_seq_length": 1733, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1362.4145785876992, + "min_seq_length": 1107, + "max_seq_length": 1844, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1978.801410105758, + "min_seq_length": 1944, + "max_seq_length": 2022, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1745.6845771144278, + "min_seq_length": 1724, + "max_seq_length": 1863, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1978.801410105758, - "min_seq_length": 1944, - "max_seq_length": 2022, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=lrds-code/boana-7b-instruct,dtype=bfloat16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1745.6845771144278, - "min_seq_length": 1724, - "max_seq_length": 1863, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=lrds-code/boana-7b-instruct,dtype=bfloat16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/lrds-code/boana-7b-instruct/results_2024-02-18T04-08-18.900803.json b/lrds-code/boana-7b-instruct/results_2024-02-18T04-08-18.900803.json index 09d004309bdea97c96fa05a21428a5811b7216f8..adf594735cf9b39343bf0470e807ebf7aa627ede 100644 --- a/lrds-code/boana-7b-instruct/results_2024-02-18T04-08-18.900803.json +++ b/lrds-code/boana-7b-instruct/results_2024-02-18T04-08-18.900803.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.4457238657513895, - "all_grouped_npm": 0.18282320960894285, + "all_grouped_average": 0.46067913453341514, + "all_grouped_npm": 0.20507807386790955, "all_grouped": { "enem_challenge": 0.21623512946116166, "bluex": 0.29207232267037553, @@ -45,7 +45,7 @@ "faquad_nli": 0.4396551724137931, "hatebr_offensive": 0.8499559054089365, "portuguese_hate_speech": 0.6742912511607251, - "tweetsentbr": 0.4037922571146918 + "tweetsentbr": 0.5383896761529224 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.21623512946116166, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8499559054089365, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6742912511607251, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4037922571146918 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5383896761529224 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.21623512946116166, @@ -150,9 +150,9 @@ "main_score": 0.6742912511607251 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4037922571146918, + "f1_macro,all": 0.5383896761529224, "acc,all": 0.6228855721393035, - "main_score": 0.4037922571146918 + "main_score": 0.5383896761529224 } }, "config_tasks": { diff --git a/lrds-code/samba-1.1B/raw_2024-02-18T01-47-54.897005/results.json b/lrds-code/samba-1.1B/raw_2024-02-18T01-47-54.897005/results.json index e62eb992f319bbe0739ca3522c457b3c3cdfa50e..f89be219ce6d590f16698e8c381c3f7433179b76 100644 --- a/lrds-code/samba-1.1B/raw_2024-02-18T01-47-54.897005/results.json +++ b/lrds-code/samba-1.1B/raw_2024-02-18T01-47-54.897005/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.3333333333333333, - "acc,all": 0.5, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.013001228192609664, - "mse,all": 5.312676674836601, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.08066759388038942, - "acc,exam_id__UNICAMP_2020": 0.05454545454545454, - "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, - "acc,exam_id__USP_2018": 0.05555555555555555, - "acc,exam_id__UNICAMP_2023": 0.18604651162790697, - "acc,exam_id__USP_2024": 0.04878048780487805, - "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, - "acc,exam_id__USP_2020": 0.08928571428571429, - "acc,exam_id__USP_2019": 0.15, - "acc,exam_id__USP_2023": 0.022727272727272728, - "acc,exam_id__UNICAMP_2019": 0.04, - "acc,exam_id__USP_2022": 0.08163265306122448, - "acc,exam_id__UNICAMP_2024": 0.08888888888888889, - "acc,exam_id__USP_2021": 0.038461538461538464, - "acc,exam_id__UNICAMP_2022": 0.10256410256410256, - "acc,exam_id__UNICAMP_2018": 0.05555555555555555, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.10216934919524143, - "acc,exam_id__2023": 0.1259259259259259, - "acc,exam_id__2010": 0.11965811965811966, - "acc,exam_id__2016_2": 0.07317073170731707, - "acc,exam_id__2011": 0.08547008547008547, - "acc,exam_id__2022": 0.08270676691729323, - "acc,exam_id__2015": 0.09243697478991597, - "acc,exam_id__2013": 0.1111111111111111, - "acc,exam_id__2014": 0.11926605504587157, - "acc,exam_id__2009": 0.11304347826086956, - "acc,exam_id__2016": 0.08264462809917356, - "acc,exam_id__2012": 0.09482758620689655, - "acc,exam_id__2017": 0.12931034482758622 - }, - "faquad_nli": { - "f1_macro,all": 0.17721518987341772, - "acc,all": 0.2153846153846154, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.35786694870727126, - "acc,all": 0.5364285714285715 - }, - "oab_exams": { - "acc,all": 0.15034168564920272, - "acc,exam_id__2016-20": 0.15, - "acc,exam_id__2015-17": 0.16666666666666666, - "acc,exam_id__2014-13": 0.15, - "acc,exam_id__2018-25": 0.1375, - "acc,exam_id__2017-22": 0.125, - "acc,exam_id__2014-14": 0.1875, - "acc,exam_id__2013-11": 0.1, - "acc,exam_id__2015-18": 0.1375, - "acc,exam_id__2011-05": 0.1625, - "acc,exam_id__2012-08": 0.2, - "acc,exam_id__2012-06a": 0.175, - "acc,exam_id__2017-24": 0.1125, - "acc,exam_id__2012-07": 0.1, - "acc,exam_id__2016-20a": 0.1875, - "acc,exam_id__2016-21": 0.1, - "acc,exam_id__2016-19": 0.10256410256410256, - "acc,exam_id__2010-01": 0.15294117647058825, - "acc,exam_id__2011-04": 0.175, - "acc,exam_id__2012-06": 0.2, - "acc,exam_id__2017-23": 0.1375, - "acc,exam_id__2012-09": 0.15584415584415584, - "acc,exam_id__2013-12": 0.1125, - "acc,exam_id__2013-10": 0.1625, - "acc,exam_id__2011-03": 0.21212121212121213, - "acc,exam_id__2015-16": 0.1125, - "acc,exam_id__2010-02": 0.17, - "acc,exam_id__2014-15": 0.15384615384615385, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.27257875968477685, - "acc,all": 0.6603995299647474 - }, - "tweetsentbr": { - "f1_macro,all": 0.03273007518573507, - "acc,all": 0.02437810945273632, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.3333333333333333, + "acc,all": 0.5, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.013001228192609664, + "mse,all": 5.312676674836601, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.08066759388038942, + "acc,exam_id__UNICAMP_2020": 0.05454545454545454, + "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, + "acc,exam_id__USP_2018": 0.05555555555555555, + "acc,exam_id__UNICAMP_2023": 0.18604651162790697, + "acc,exam_id__USP_2024": 0.04878048780487805, + "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, + "acc,exam_id__USP_2020": 0.08928571428571429, + "acc,exam_id__USP_2019": 0.15, + "acc,exam_id__USP_2023": 0.022727272727272728, + "acc,exam_id__UNICAMP_2019": 0.04, + "acc,exam_id__USP_2022": 0.08163265306122448, + "acc,exam_id__UNICAMP_2024": 0.08888888888888889, + "acc,exam_id__USP_2021": 0.038461538461538464, + "acc,exam_id__UNICAMP_2022": 0.10256410256410256, + "acc,exam_id__UNICAMP_2018": 0.05555555555555555, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.10216934919524143, + "acc,exam_id__2023": 0.1259259259259259, + "acc,exam_id__2010": 0.11965811965811966, + "acc,exam_id__2016_2": 0.07317073170731707, + "acc,exam_id__2011": 0.08547008547008547, + "acc,exam_id__2022": 0.08270676691729323, + "acc,exam_id__2015": 0.09243697478991597, + "acc,exam_id__2013": 0.1111111111111111, + "acc,exam_id__2014": 0.11926605504587157, + "acc,exam_id__2009": 0.11304347826086956, + "acc,exam_id__2016": 0.08264462809917356, + "acc,exam_id__2012": 0.09482758620689655, + "acc,exam_id__2017": 0.12931034482758622 + }, + "faquad_nli": { + "f1_macro,all": 0.17721518987341772, + "acc,all": 0.2153846153846154, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.5368004230609069, + "acc,all": 0.5364285714285715 + }, + "oab_exams": { + "acc,all": 0.15034168564920272, + "acc,exam_id__2016-20": 0.15, + "acc,exam_id__2015-17": 0.16666666666666666, + "acc,exam_id__2014-13": 0.15, + "acc,exam_id__2018-25": 0.1375, + "acc,exam_id__2017-22": 0.125, + "acc,exam_id__2014-14": 0.1875, + "acc,exam_id__2013-11": 0.1, + "acc,exam_id__2015-18": 0.1375, + "acc,exam_id__2011-05": 0.1625, + "acc,exam_id__2012-08": 0.2, + "acc,exam_id__2012-06a": 0.175, + "acc,exam_id__2017-24": 0.1125, + "acc,exam_id__2012-07": 0.1, + "acc,exam_id__2016-20a": 0.1875, + "acc,exam_id__2016-21": 0.1, + "acc,exam_id__2016-19": 0.10256410256410256, + "acc,exam_id__2010-01": 0.15294117647058825, + "acc,exam_id__2011-04": 0.175, + "acc,exam_id__2012-06": 0.2, + "acc,exam_id__2017-23": 0.1375, + "acc,exam_id__2012-09": 0.15584415584415584, + "acc,exam_id__2013-12": 0.1125, + "acc,exam_id__2013-10": 0.1625, + "acc,exam_id__2011-03": 0.21212121212121213, + "acc,exam_id__2015-16": 0.1125, + "acc,exam_id__2010-02": 0.17, + "acc,exam_id__2014-15": 0.15384615384615385, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.40886813952716533, + "acc,all": 0.6603995299647474 + }, + "tweetsentbr": { + "f1_macro,all": 0.04364010024764676, + "acc,all": 0.02437810945273632, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 977, - "non_truncated": 13173, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 2180, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "4878a7614d32b8a051ed628c8986e29e9a8ee4a3", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 2211633920, - "model_num_parameters": 1100048384, - "model_is_loaded_in_4bit": false, - "model_is_loaded_in_8bit": false, - "model_is_quantized": null, - "model_device": "cuda:1", - "batch_size": 64, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1580.9889705882354, - "min_seq_length": 1558, - "max_seq_length": 1647, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1789.9889705882354, - "min_seq_length": 1767, - "max_seq_length": 1856, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 67, - "non_truncated": 652, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 82, - "mean_seq_length": 1741.7426981919332, - "min_seq_length": 1375, - "max_seq_length": 2501, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.885952712100139 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 25, - "non_truncated": 1404, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 27, - "mean_seq_length": 1620.9881035689293, - "min_seq_length": 1368, - "max_seq_length": 2660, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9811056682995103 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1788.1184615384616, - "min_seq_length": 1736, - "max_seq_length": 1895, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1715.9178571428572, - "min_seq_length": 1692, - "max_seq_length": 1962, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 977, + "non_truncated": 13173, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 2180, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "4878a7614d32b8a051ed628c8986e29e9a8ee4a3", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 2211633920, + "model_num_parameters": 1100048384, + "model_is_loaded_in_4bit": false, + "model_is_loaded_in_8bit": false, + "model_is_quantized": null, + "model_device": "cuda:1", + "batch_size": 64, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1393.4145785876992, - "min_seq_length": 1138, - "max_seq_length": 1875, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1580.9889705882354, + "min_seq_length": 1558, + "max_seq_length": 1647, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1789.9889705882354, + "min_seq_length": 1767, + "max_seq_length": 1856, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 67, + "non_truncated": 652, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 82, + "mean_seq_length": 1741.7426981919332, + "min_seq_length": 1375, + "max_seq_length": 2501, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.885952712100139 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 25, + "non_truncated": 1404, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 27, + "mean_seq_length": 1620.9881035689293, + "min_seq_length": 1368, + "max_seq_length": 2660, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9811056682995103 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1788.1184615384616, + "min_seq_length": 1736, + "max_seq_length": 1895, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1715.9178571428572, + "min_seq_length": 1692, + "max_seq_length": 1962, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1393.4145785876992, + "min_seq_length": 1138, + "max_seq_length": 1875, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 851, + "non_truncated": 0, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 2036, + "mean_seq_length": 2207.801410105758, + "min_seq_length": 2173, + "max_seq_length": 2251, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 22.607520564042304 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 34, + "non_truncated": 1976, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 35, + "mean_seq_length": 1982.6845771144278, + "min_seq_length": 1961, + "max_seq_length": 2100, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 24.98258706467662 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 851, - "non_truncated": 0, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 2036, - "mean_seq_length": 2207.801410105758, - "min_seq_length": 2173, - "max_seq_length": 2251, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 22.607520564042304 + "config": { + "model": "huggingface", + "model_args": "pretrained=lrds-code/samba-1.1B,dtype=bfloat16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 34, - "non_truncated": 1976, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 35, - "mean_seq_length": 1982.6845771144278, - "min_seq_length": 1961, - "max_seq_length": 2100, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 24.98258706467662 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=lrds-code/samba-1.1B,dtype=bfloat16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/lrds-code/samba-1.1B/results_2024-02-18T01-47-54.897005.json b/lrds-code/samba-1.1B/results_2024-02-18T01-47-54.897005.json index 64dd99c7231ba44a40f70a435397eba81b0fd7c5..68c07758093336bfaaed8891e59f5c837922afbd 100644 --- a/lrds-code/samba-1.1B/results_2024-02-18T01-47-54.897005.json +++ b/lrds-code/samba-1.1B/results_2024-02-18T01-47-54.897005.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.16887824041133082, - "all_grouped_npm": -0.26600705330094676, + "all_grouped_average": 0.20511522699554594, + "all_grouped_npm": -0.195374386085522, "all_grouped": { "enem_challenge": 0.10216934919524143, "bluex": 0.08066759388038942, @@ -43,9 +43,9 @@ "assin2_rte": 0.3333333333333333, "assin2_sts": 0.013001228192609664, "faquad_nli": 0.17721518987341772, - "hatebr_offensive": 0.35786694870727126, - "portuguese_hate_speech": 0.27257875968477685, - "tweetsentbr": 0.03273007518573507 + "hatebr_offensive": 0.5368004230609069, + "portuguese_hate_speech": 0.40886813952716533, + "tweetsentbr": 0.04364010024764676 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.10216934919524143, @@ -54,9 +54,9 @@ "harness|assin2_rte|assin2_rte|None|15": 0.3333333333333333, "harness|assin2_sts|assin2_sts|None|15": 0.013001228192609664, "harness|faquad_nli|faquad_nli|None|15": 0.17721518987341772, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.35786694870727126, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.27257875968477685, - "harness|tweetsentbr|tweetsentbr|None|25": 0.03273007518573507 + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5368004230609069, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.40886813952716533, + "harness|tweetsentbr|tweetsentbr|None|25": 0.04364010024764676 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.10216934919524143, @@ -140,19 +140,19 @@ "main_score": 0.17721518987341772 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.35786694870727126, + "f1_macro,all": 0.5368004230609069, "acc,all": 0.5364285714285715, - "main_score": 0.35786694870727126 + "main_score": 0.5368004230609069 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.27257875968477685, + "f1_macro,all": 0.40886813952716533, "acc,all": 0.6603995299647474, - "main_score": 0.27257875968477685 + "main_score": 0.40886813952716533 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.03273007518573507, + "f1_macro,all": 0.04364010024764676, "acc,all": 0.02437810945273632, - "main_score": 0.03273007518573507 + "main_score": 0.04364010024764676 } }, "config_tasks": { diff --git a/lucianosb/boto-27B/raw_2024-07-13T01-31-41.225952/results.json b/lucianosb/boto-27B/raw_2024-07-13T01-31-41.225952/results.json index 6a5fcb7aa926cefe68a81d6c2a87d62686098abc..a9ad89921602007fe246e42c3c9c0c494af25eee 100644 --- a/lucianosb/boto-27B/raw_2024-07-13T01-31-41.225952/results.json +++ b/lucianosb/boto-27B/raw_2024-07-13T01-31-41.225952/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.3433766760091345, - "acc,all": 0.5437091503267973, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.18721992880494653, - "mse,all": 2.3475385946696012, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.23226703755215578, - "acc,exam_id__USP_2021": 0.15384615384615385, - "acc,exam_id__USP_2019": 0.3, - "acc,exam_id__USP_2023": 0.20454545454545456, - "acc,exam_id__USP_2024": 0.17073170731707318, - "acc,exam_id__UNICAMP_2020": 0.2909090909090909, - "acc,exam_id__UNICAMP_2021_2": 0.21568627450980393, - "acc,exam_id__USP_2020": 0.23214285714285715, - "acc,exam_id__UNICAMP_2022": 0.28205128205128205, - "acc,exam_id__UNICAMP_2023": 0.27906976744186046, - "acc,exam_id__UNICAMP_2019": 0.18, - "acc,exam_id__UNICAMP_2024": 0.2222222222222222, - "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, - "acc,exam_id__USP_2022": 0.16326530612244897, - "acc,exam_id__USP_2018": 0.24074074074074073, - "acc,exam_id__UNICAMP_2018": 0.25925925925925924, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.21553533939818054, - "acc,exam_id__2011": 0.2564102564102564, - "acc,exam_id__2016": 0.18181818181818182, - "acc,exam_id__2017": 0.1810344827586207, - "acc,exam_id__2022": 0.21804511278195488, - "acc,exam_id__2012": 0.22413793103448276, - "acc,exam_id__2015": 0.20168067226890757, - "acc,exam_id__2013": 0.21296296296296297, - "acc,exam_id__2023": 0.22962962962962963, - "acc,exam_id__2014": 0.22935779816513763, - "acc,exam_id__2009": 0.24347826086956523, - "acc,exam_id__2016_2": 0.21138211382113822, - "acc,exam_id__2010": 0.19658119658119658 - }, - "faquad_nli": { - "f1_macro,all": 0.33430422904107115, - "acc,all": 0.6738461538461539, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.3611429598640967, - "acc,all": 0.5357142857142857 - }, - "oab_exams": { - "acc,all": 0.26514806378132116, - "acc,exam_id__2010-01": 0.2, - "acc,exam_id__2013-10": 0.2875, - "acc,exam_id__2017-23": 0.275, - "acc,exam_id__2014-14": 0.2375, - "acc,exam_id__2012-06": 0.25, - "acc,exam_id__2011-05": 0.2875, - "acc,exam_id__2012-07": 0.3, - "acc,exam_id__2012-09": 0.2077922077922078, - "acc,exam_id__2013-12": 0.2125, - "acc,exam_id__2014-15": 0.21794871794871795, - "acc,exam_id__2015-18": 0.225, - "acc,exam_id__2011-04": 0.225, - "acc,exam_id__2016-21": 0.275, - "acc,exam_id__2015-16": 0.275, - "acc,exam_id__2016-20a": 0.1625, - "acc,exam_id__2016-19": 0.21794871794871795, - "acc,exam_id__2012-06a": 0.3125, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2018-25": 0.3, - "acc,exam_id__2015-17": 0.41025641025641024, - "acc,exam_id__2013-11": 0.275, - "acc,exam_id__2011-03": 0.2828282828282828, - "acc,exam_id__2016-20": 0.2875, - "acc,exam_id__2010-02": 0.25, - "acc,exam_id__2012-08": 0.3125, - "acc,exam_id__2017-24": 0.2125, - "acc,exam_id__2017-22": 0.2625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.3085809142505695, - "acc,all": 0.5276145710928319 - }, - "tweetsentbr": { - "f1_macro,all": 0.22435015163747685, - "acc,all": 0.2462686567164179, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.5150650140137017, + "acc,all": 0.5437091503267973, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.18721992880494653, + "mse,all": 2.3475385946696012, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.23226703755215578, + "acc,exam_id__USP_2021": 0.15384615384615385, + "acc,exam_id__USP_2019": 0.3, + "acc,exam_id__USP_2023": 0.20454545454545456, + "acc,exam_id__USP_2024": 0.17073170731707318, + "acc,exam_id__UNICAMP_2020": 0.2909090909090909, + "acc,exam_id__UNICAMP_2021_2": 0.21568627450980393, + "acc,exam_id__USP_2020": 0.23214285714285715, + "acc,exam_id__UNICAMP_2022": 0.28205128205128205, + "acc,exam_id__UNICAMP_2023": 0.27906976744186046, + "acc,exam_id__UNICAMP_2019": 0.18, + "acc,exam_id__UNICAMP_2024": 0.2222222222222222, + "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, + "acc,exam_id__USP_2022": 0.16326530612244897, + "acc,exam_id__USP_2018": 0.24074074074074073, + "acc,exam_id__UNICAMP_2018": 0.25925925925925924, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.21553533939818054, + "acc,exam_id__2011": 0.2564102564102564, + "acc,exam_id__2016": 0.18181818181818182, + "acc,exam_id__2017": 0.1810344827586207, + "acc,exam_id__2022": 0.21804511278195488, + "acc,exam_id__2012": 0.22413793103448276, + "acc,exam_id__2015": 0.20168067226890757, + "acc,exam_id__2013": 0.21296296296296297, + "acc,exam_id__2023": 0.22962962962962963, + "acc,exam_id__2014": 0.22935779816513763, + "acc,exam_id__2009": 0.24347826086956523, + "acc,exam_id__2016_2": 0.21138211382113822, + "acc,exam_id__2010": 0.19658119658119658 + }, + "faquad_nli": { + "f1_macro,all": 0.5014563435616067, + "acc,all": 0.6738461538461539, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.5417144397961452, + "acc,all": 0.5357142857142857 + }, + "oab_exams": { + "acc,all": 0.26514806378132116, + "acc,exam_id__2010-01": 0.2, + "acc,exam_id__2013-10": 0.2875, + "acc,exam_id__2017-23": 0.275, + "acc,exam_id__2014-14": 0.2375, + "acc,exam_id__2012-06": 0.25, + "acc,exam_id__2011-05": 0.2875, + "acc,exam_id__2012-07": 0.3, + "acc,exam_id__2012-09": 0.2077922077922078, + "acc,exam_id__2013-12": 0.2125, + "acc,exam_id__2014-15": 0.21794871794871795, + "acc,exam_id__2015-18": 0.225, + "acc,exam_id__2011-04": 0.225, + "acc,exam_id__2016-21": 0.275, + "acc,exam_id__2015-16": 0.275, + "acc,exam_id__2016-20a": 0.1625, + "acc,exam_id__2016-19": 0.21794871794871795, + "acc,exam_id__2012-06a": 0.3125, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2018-25": 0.3, + "acc,exam_id__2015-17": 0.41025641025641024, + "acc,exam_id__2013-11": 0.275, + "acc,exam_id__2011-03": 0.2828282828282828, + "acc,exam_id__2016-20": 0.2875, + "acc,exam_id__2010-02": 0.25, + "acc,exam_id__2012-08": 0.3125, + "acc,exam_id__2017-24": 0.2125, + "acc,exam_id__2017-22": 0.2625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4628713713758542, + "acc,all": 0.5276145710928319 + }, + "tweetsentbr": { + "f1_macro,all": 0.2991335355166358, + "acc,all": 0.2462686567164179, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e8b420cb2d560073b5af3d6119acc3946c99b83c", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 54454268416, - "model_num_parameters": 27227128320, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 2, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1008.8839869281046, - "min_seq_length": 994, - "max_seq_length": 1051, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1217.8839869281046, - "min_seq_length": 1203, - "max_seq_length": 1260, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1373.4464534075105, - "min_seq_length": 1080, - "max_seq_length": 1993, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1215.8278516445066, - "min_seq_length": 1019, - "max_seq_length": 2062, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1094.1338461538462, - "min_seq_length": 1056, - "max_seq_length": 1167, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e8b420cb2d560073b5af3d6119acc3946c99b83c", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 54454268416, + "model_num_parameters": 27227128320, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 2, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 896.505, - "min_seq_length": 881, - "max_seq_length": 1085, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 987.4004555808656, - "min_seq_length": 791, - "max_seq_length": 1322, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1008.8839869281046, + "min_seq_length": 994, + "max_seq_length": 1051, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1217.8839869281046, + "min_seq_length": 1203, + "max_seq_length": 1260, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1373.4464534075105, + "min_seq_length": 1080, + "max_seq_length": 1993, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1215.8278516445066, + "min_seq_length": 1019, + "max_seq_length": 2062, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1094.1338461538462, + "min_seq_length": 1056, + "max_seq_length": 1167, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 896.505, + "min_seq_length": 881, + "max_seq_length": 1085, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 987.4004555808656, + "min_seq_length": 791, + "max_seq_length": 1322, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1272.13866039953, + "min_seq_length": 1244, + "max_seq_length": 1303, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1101.9791044776118, + "min_seq_length": 1087, + "max_seq_length": 1144, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1272.13866039953, - "min_seq_length": 1244, - "max_seq_length": 1303, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=lucianosb/boto-27B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1101.9791044776118, - "min_seq_length": 1087, - "max_seq_length": 1144, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=lucianosb/boto-27B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/lucianosb/boto-27B/results_2024-07-13T01-31-41.225952.json b/lucianosb/boto-27B/results_2024-07-13T01-31-41.225952.json index bcc40ba561d444b150bb2edf83f3acdb165e197a..3fbe5c61dc0077c160ed98cc80d5b19c4158c469 100644 --- a/lucianosb/boto-27B/results_2024-07-13T01-31-41.225952.json +++ b/lucianosb/boto-27B/results_2024-07-13T01-31-41.225952.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.2746583667043281, - "all_grouped_npm": -0.11775485154717903, + "all_grouped_average": 0.3578234526445053, + "all_grouped_npm": 0.03993539974829011, "all_grouped": { "enem_challenge": 0.21553533939818054, "bluex": 0.23226703755215578, "oab_exams": 0.26514806378132116, - "assin2_rte": 0.3433766760091345, + "assin2_rte": 0.5150650140137017, "assin2_sts": 0.18721992880494653, - "faquad_nli": 0.33430422904107115, - "hatebr_offensive": 0.3611429598640967, - "portuguese_hate_speech": 0.3085809142505695, - "tweetsentbr": 0.22435015163747685 + "faquad_nli": 0.5014563435616067, + "hatebr_offensive": 0.5417144397961452, + "portuguese_hate_speech": 0.4628713713758542, + "tweetsentbr": 0.2991335355166358 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.21553533939818054, "harness|bluex|bluex|None|3": 0.23226703755215578, "harness|oab_exams|oab_exams|None|3": 0.26514806378132116, - "harness|assin2_rte|assin2_rte|None|15": 0.3433766760091345, + "harness|assin2_rte|assin2_rte|None|15": 0.5150650140137017, "harness|assin2_sts|assin2_sts|None|15": 0.18721992880494653, - "harness|faquad_nli|faquad_nli|None|15": 0.33430422904107115, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3611429598640967, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3085809142505695, - "harness|tweetsentbr|tweetsentbr|None|25": 0.22435015163747685 + "harness|faquad_nli|faquad_nli|None|15": 0.5014563435616067, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5417144397961452, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4628713713758542, + "harness|tweetsentbr|tweetsentbr|None|25": 0.2991335355166358 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.21553533939818054, @@ -125,9 +125,9 @@ "main_score": 0.26514806378132116 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.3433766760091345, + "f1_macro,all": 0.5150650140137017, "acc,all": 0.5437091503267973, - "main_score": 0.3433766760091345 + "main_score": 0.5150650140137017 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.18721992880494653, @@ -135,24 +135,24 @@ "main_score": 0.18721992880494653 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.33430422904107115, + "f1_macro,all": 0.5014563435616067, "acc,all": 0.6738461538461539, - "main_score": 0.33430422904107115 + "main_score": 0.5014563435616067 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.3611429598640967, + "f1_macro,all": 0.5417144397961452, "acc,all": 0.5357142857142857, - "main_score": 0.3611429598640967 + "main_score": 0.5417144397961452 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.3085809142505695, + "f1_macro,all": 0.4628713713758542, "acc,all": 0.5276145710928319, - "main_score": 0.3085809142505695 + "main_score": 0.4628713713758542 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.22435015163747685, + "f1_macro,all": 0.2991335355166358, "acc,all": 0.2462686567164179, - "main_score": 0.22435015163747685 + "main_score": 0.2991335355166358 } }, "config_tasks": { diff --git a/matheusrdgsf/cesar-ptbr/raw_2024-05-27T22-31-08.120812/results.json b/matheusrdgsf/cesar-ptbr/raw_2024-05-27T22-31-08.120812/results.json index e41c816aa66f43f8f4d575fce1c5367fba8fa354..64b511f19f17ce1434ac459a9297f780c6b8f577 100644 --- a/matheusrdgsf/cesar-ptbr/raw_2024-05-27T22-31-08.120812/results.json +++ b/matheusrdgsf/cesar-ptbr/raw_2024-05-27T22-31-08.120812/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.5831734998147651, - "acc,all": 0.875, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6849438767598355, - "mse,all": 0.6465727124183006, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.46870653685674546, - "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, - "acc,exam_id__UNICAMP_2023": 0.4418604651162791, - "acc,exam_id__USP_2018": 0.3888888888888889, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2023": 0.5454545454545454, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__UNICAMP_2019": 0.46, - "acc,exam_id__UNICAMP_2022": 0.5128205128205128, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__USP_2024": 0.6585365853658537, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__USP_2021": 0.3076923076923077, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2020": 0.4727272727272727, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5374387683694891, - "acc,exam_id__2013": 0.5833333333333334, - "acc,exam_id__2011": 0.6239316239316239, - "acc,exam_id__2010": 0.5555555555555556, - "acc,exam_id__2012": 0.5603448275862069, - "acc,exam_id__2017": 0.5172413793103449, - "acc,exam_id__2009": 0.5565217391304348, - "acc,exam_id__2016": 0.4628099173553719, - "acc,exam_id__2014": 0.5688073394495413, - "acc,exam_id__2022": 0.48120300751879697, - "acc,exam_id__2016_2": 0.5609756097560976, - "acc,exam_id__2015": 0.5042016806722689, - "acc,exam_id__2023": 0.4962962962962963 - }, - "faquad_nli": { - "f1_macro,all": 0.7380971253434792, - "acc,all": 0.8123076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8330124462550406, - "acc,all": 0.8335714285714285 - }, - "oab_exams": { - "acc,all": 0.3826879271070615, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2013-11": 0.3875, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2010-02": 0.41, - "acc,exam_id__2015-18": 0.4, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2011-03": 0.36363636363636365, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2018-25": 0.375, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2014-15": 0.5384615384615384, - "acc,exam_id__2011-05": 0.3875, - "acc,exam_id__2017-24": 0.325, - "acc,exam_id__2011-04": 0.3375, - "acc,exam_id__2015-17": 0.4358974358974359, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2013-12": 0.4125, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2014-14": 0.3875, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2015-16": 0.3625, - "acc,exam_id__2016-21": 0.35, - "acc,exam_id__2016-20a": 0.2625, - "acc,exam_id__2012-07": 0.3, - "acc,exam_id__2012-06": 0.3875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6748662787280315, - "acc,all": 0.6944770857814336 - }, - "tweetsentbr": { - "f1_macro,all": 0.4270736435976256, - "acc,all": 0.6477611940298508, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8747602497221476, + "acc,all": 0.875, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6849438767598355, + "mse,all": 0.6465727124183006, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.46870653685674546, + "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, + "acc,exam_id__UNICAMP_2023": 0.4418604651162791, + "acc,exam_id__USP_2018": 0.3888888888888889, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2023": 0.5454545454545454, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__UNICAMP_2019": 0.46, + "acc,exam_id__UNICAMP_2022": 0.5128205128205128, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__USP_2024": 0.6585365853658537, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__USP_2021": 0.3076923076923077, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2020": 0.4727272727272727, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5374387683694891, + "acc,exam_id__2013": 0.5833333333333334, + "acc,exam_id__2011": 0.6239316239316239, + "acc,exam_id__2010": 0.5555555555555556, + "acc,exam_id__2012": 0.5603448275862069, + "acc,exam_id__2017": 0.5172413793103449, + "acc,exam_id__2009": 0.5565217391304348, + "acc,exam_id__2016": 0.4628099173553719, + "acc,exam_id__2014": 0.5688073394495413, + "acc,exam_id__2022": 0.48120300751879697, + "acc,exam_id__2016_2": 0.5609756097560976, + "acc,exam_id__2015": 0.5042016806722689, + "acc,exam_id__2023": 0.4962962962962963 + }, + "faquad_nli": { + "f1_macro,all": 0.7380971253434792, + "acc,all": 0.8123076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8330124462550406, + "acc,all": 0.8335714285714285 + }, + "oab_exams": { + "acc,all": 0.3826879271070615, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2013-11": 0.3875, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2010-02": 0.41, + "acc,exam_id__2015-18": 0.4, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2011-03": 0.36363636363636365, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2018-25": 0.375, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2014-15": 0.5384615384615384, + "acc,exam_id__2011-05": 0.3875, + "acc,exam_id__2017-24": 0.325, + "acc,exam_id__2011-04": 0.3375, + "acc,exam_id__2015-17": 0.4358974358974359, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2013-12": 0.4125, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2014-14": 0.3875, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2015-16": 0.3625, + "acc,exam_id__2016-21": 0.35, + "acc,exam_id__2016-20a": 0.2625, + "acc,exam_id__2012-07": 0.3, + "acc,exam_id__2012-06": 0.3875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6748662787280315, + "acc,all": 0.6944770857814336 + }, + "tweetsentbr": { + "f1_macro,all": 0.5694315247968341, + "acc,all": 0.6477611940298508, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "8128029fba795c423004d08695fdda8491289748", - "model_dtype": "torch.float16", - "model_memory_footprint": 8212332544, - "model_num_parameters": 1143017472, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1646.7455065359477, - "min_seq_length": 1623, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1855.7455065359477, - "min_seq_length": 1832, - "max_seq_length": 1922, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1792.9262865090404, - "min_seq_length": 1416, - "max_seq_length": 2593, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1693.039188243527, - "min_seq_length": 1427, - "max_seq_length": 2691, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1871.9876923076922, - "min_seq_length": 1816, - "max_seq_length": 1992, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "8128029fba795c423004d08695fdda8491289748", + "model_dtype": "torch.float16", + "model_memory_footprint": 8212332544, + "model_num_parameters": 1143017472, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1752.3878571428572, - "min_seq_length": 1729, - "max_seq_length": 2003, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1438.764464692483, - "min_seq_length": 1172, - "max_seq_length": 1941, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1646.7455065359477, + "min_seq_length": 1623, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1855.7455065359477, + "min_seq_length": 1832, + "max_seq_length": 1922, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1792.9262865090404, + "min_seq_length": 1416, + "max_seq_length": 2593, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1693.039188243527, + "min_seq_length": 1427, + "max_seq_length": 2691, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1871.9876923076922, + "min_seq_length": 1816, + "max_seq_length": 1992, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1752.3878571428572, + "min_seq_length": 1729, + "max_seq_length": 2003, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1438.764464692483, + "min_seq_length": 1172, + "max_seq_length": 1941, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2253.3360752056406, + "min_seq_length": 2218, + "max_seq_length": 2292, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1999.2492537313433, + "min_seq_length": 1978, + "max_seq_length": 2094, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2253.3360752056406, - "min_seq_length": 2218, - "max_seq_length": 2292, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=TheBloke/zephyr-7B-beta-GPTQ,peft=matheusrdgsf/cesar-ptbr,autogptq=True,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1999.2492537313433, - "min_seq_length": 1978, - "max_seq_length": 2094, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=TheBloke/zephyr-7B-beta-GPTQ,peft=matheusrdgsf/cesar-ptbr,autogptq=True,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/matheusrdgsf/cesar-ptbr/results_2024-05-27T22-31-08.120812.json b/matheusrdgsf/cesar-ptbr/results_2024-05-27T22-31-08.120812.json index 280dcc2480b9c2e626352d94a32783692427cc36..f82d5daa9acfac518901cb8ba88605d502e4f034 100644 --- a/matheusrdgsf/cesar-ptbr/results_2024-05-27T22-31-08.120812.json +++ b/matheusrdgsf/cesar-ptbr/results_2024-05-27T22-31-08.120812.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5922222336480082, - "all_grouped_npm": 0.3858251528786401, + "all_grouped_average": 0.6404383037709628, + "all_grouped_npm": 0.47416021787115503, "all_grouped": { "enem_challenge": 0.5374387683694891, "bluex": 0.46870653685674546, "oab_exams": 0.3826879271070615, - "assin2_rte": 0.5831734998147651, + "assin2_rte": 0.8747602497221476, "assin2_sts": 0.6849438767598355, "faquad_nli": 0.7380971253434792, "hatebr_offensive": 0.8330124462550406, "portuguese_hate_speech": 0.6748662787280315, - "tweetsentbr": 0.4270736435976256 + "tweetsentbr": 0.5694315247968341 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5374387683694891, "harness|bluex|bluex|None|3": 0.46870653685674546, "harness|oab_exams|oab_exams|None|3": 0.3826879271070615, - "harness|assin2_rte|assin2_rte|None|15": 0.5831734998147651, + "harness|assin2_rte|assin2_rte|None|15": 0.8747602497221476, "harness|assin2_sts|assin2_sts|None|15": 0.6849438767598355, "harness|faquad_nli|faquad_nli|None|15": 0.7380971253434792, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8330124462550406, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6748662787280315, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4270736435976256 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5694315247968341 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5374387683694891, @@ -125,9 +125,9 @@ "main_score": 0.3826879271070615 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.5831734998147651, + "f1_macro,all": 0.8747602497221476, "acc,all": 0.875, - "main_score": 0.5831734998147651 + "main_score": 0.8747602497221476 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.6849438767598355, @@ -150,9 +150,9 @@ "main_score": 0.6748662787280315 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4270736435976256, + "f1_macro,all": 0.5694315247968341, "acc,all": 0.6477611940298508, - "main_score": 0.4270736435976256 + "main_score": 0.5694315247968341 } }, "config_tasks": { diff --git a/maywell/Synatra-7B-v0.3-RP/raw_2024-05-18T21-35-16.166862/results.json b/maywell/Synatra-7B-v0.3-RP/raw_2024-05-18T21-35-16.166862/results.json index 24b2415acae4dc48d49147aca0b8efeeeaafd965..a8e6f6e9609d20ac11de39f1357e333adab5643b 100644 --- a/maywell/Synatra-7B-v0.3-RP/raw_2024-05-18T21-35-16.166862/results.json +++ b/maywell/Synatra-7B-v0.3-RP/raw_2024-05-18T21-35-16.166862/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8982291377095245, - "acc,all": 0.8982843137254902, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6435083220682054, - "mse,all": 1.0219403594771241, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.46870653685674546, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2019": 0.35, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "acc,exam_id__USP_2021": 0.38461538461538464, - "acc,exam_id__UNICAMP_2020": 0.4909090909090909, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2018": 0.42592592592592593, - "acc,exam_id__USP_2022": 0.42857142857142855, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "acc,exam_id__UNICAMP_2023": 0.5116279069767442, - "acc,exam_id__UNICAMP_2021_1": 0.34782608695652173, - "acc,exam_id__USP_2020": 0.4107142857142857, - "acc,exam_id__USP_2024": 0.6585365853658537, - "acc,exam_id__USP_2023": 0.5681818181818182, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5801259622113366, - "acc,exam_id__2012": 0.5258620689655172, - "acc,exam_id__2017": 0.5775862068965517, - "acc,exam_id__2013": 0.6111111111111112, - "acc,exam_id__2016": 0.5702479338842975, - "acc,exam_id__2011": 0.6837606837606838, - "acc,exam_id__2015": 0.5630252100840336, - "acc,exam_id__2022": 0.5488721804511278, - "acc,exam_id__2014": 0.5779816513761468, - "acc,exam_id__2010": 0.5641025641025641, - "acc,exam_id__2009": 0.5130434782608696, - "acc,exam_id__2016_2": 0.5853658536585366, - "acc,exam_id__2023": 0.6370370370370371 - }, - "faquad_nli": { - "f1_macro,all": 0.2571759752087621, - "acc,all": 0.23076923076923078, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7439554115565462, - "acc,all": 0.7564285714285715 - }, - "oab_exams": { - "acc,all": 0.4054669703872437, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2016-20a": 0.3, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2016-21": 0.425, - "acc,exam_id__2013-10": 0.3, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2010-02": 0.37, - "acc,exam_id__2012-06": 0.425, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2011-04": 0.3375, - "acc,exam_id__2012-08": 0.4375, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2011-05": 0.4125, - "acc,exam_id__2012-09": 0.36363636363636365, - "acc,exam_id__2017-24": 0.45, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2016-20": 0.4625, - "acc,exam_id__2013-12": 0.5, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2014-14": 0.4375, - "acc,exam_id__2017-22": 0.45, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2011-03": 0.36363636363636365, - "acc,exam_id__2015-17": 0.47435897435897434, - "acc,exam_id__2017-23": 0.4, - "acc,exam_id__2013-11": 0.4625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6878230465186987, - "acc,all": 0.7391304347826086 - }, - "tweetsentbr": { - "f1_macro,all": 0.5056575293768903, - "acc,all": 0.6940298507462687, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8982291377095245, + "acc,all": 0.8982843137254902, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6435083220682054, + "mse,all": 1.0219403594771241, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.46870653685674546, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2019": 0.35, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "acc,exam_id__USP_2021": 0.38461538461538464, + "acc,exam_id__UNICAMP_2020": 0.4909090909090909, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2018": 0.42592592592592593, + "acc,exam_id__USP_2022": 0.42857142857142855, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "acc,exam_id__UNICAMP_2023": 0.5116279069767442, + "acc,exam_id__UNICAMP_2021_1": 0.34782608695652173, + "acc,exam_id__USP_2020": 0.4107142857142857, + "acc,exam_id__USP_2024": 0.6585365853658537, + "acc,exam_id__USP_2023": 0.5681818181818182, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5801259622113366, + "acc,exam_id__2012": 0.5258620689655172, + "acc,exam_id__2017": 0.5775862068965517, + "acc,exam_id__2013": 0.6111111111111112, + "acc,exam_id__2016": 0.5702479338842975, + "acc,exam_id__2011": 0.6837606837606838, + "acc,exam_id__2015": 0.5630252100840336, + "acc,exam_id__2022": 0.5488721804511278, + "acc,exam_id__2014": 0.5779816513761468, + "acc,exam_id__2010": 0.5641025641025641, + "acc,exam_id__2009": 0.5130434782608696, + "acc,exam_id__2016_2": 0.5853658536585366, + "acc,exam_id__2023": 0.6370370370370371 + }, + "faquad_nli": { + "f1_macro,all": 0.3857639628131432, + "acc,all": 0.23076923076923078, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7439554115565462, + "acc,all": 0.7564285714285715 + }, + "oab_exams": { + "acc,all": 0.4054669703872437, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2016-20a": 0.3, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2016-21": 0.425, + "acc,exam_id__2013-10": 0.3, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2010-02": 0.37, + "acc,exam_id__2012-06": 0.425, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2011-04": 0.3375, + "acc,exam_id__2012-08": 0.4375, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2011-05": 0.4125, + "acc,exam_id__2012-09": 0.36363636363636365, + "acc,exam_id__2017-24": 0.45, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2016-20": 0.4625, + "acc,exam_id__2013-12": 0.5, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2014-14": 0.4375, + "acc,exam_id__2017-22": 0.45, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2011-03": 0.36363636363636365, + "acc,exam_id__2015-17": 0.47435897435897434, + "acc,exam_id__2017-23": 0.4, + "acc,exam_id__2013-11": 0.4625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6878230465186987, + "acc,all": 0.7391304347826086 + }, + "tweetsentbr": { + "f1_macro,all": 0.6742100391691871, + "acc,all": 0.6940298507462687, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "a994747e68972f9018cd454730174211f9e46736", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020376064, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 64, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "a994747e68972f9018cd454730174211f9e46736", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020376064, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 64, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=maywell/Synatra-7B-v0.3-RP,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=maywell/Synatra-7B-v0.3-RP,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/maywell/Synatra-7B-v0.3-RP/results_2024-05-18T21-35-16.166862.json b/maywell/Synatra-7B-v0.3-RP/results_2024-05-18T21-35-16.166862.json index 24d4a6baa2826433c35a40021692384062cdb795..2e5905e570a56762ee4b63e3f4eef8da7da17ca0 100644 --- a/maywell/Synatra-7B-v0.3-RP/results_2024-05-18T21-35-16.166862.json +++ b/maywell/Synatra-7B-v0.3-RP/results_2024-05-18T21-35-16.166862.json @@ -34,18 +34,18 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5767387657659948, - "all_grouped_npm": 0.3582757986709195, + "all_grouped_average": 0.609754376587848, + "all_grouped_npm": 0.4124088169381254, "all_grouped": { "enem_challenge": 0.5801259622113366, "bluex": 0.46870653685674546, "oab_exams": 0.4054669703872437, "assin2_rte": 0.8982291377095245, "assin2_sts": 0.6435083220682054, - "faquad_nli": 0.2571759752087621, + "faquad_nli": 0.3857639628131432, "hatebr_offensive": 0.7439554115565462, "portuguese_hate_speech": 0.6878230465186987, - "tweetsentbr": 0.5056575293768903 + "tweetsentbr": 0.6742100391691871 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5801259622113366, @@ -53,10 +53,10 @@ "harness|oab_exams|oab_exams|None|3": 0.4054669703872437, "harness|assin2_rte|assin2_rte|None|15": 0.8982291377095245, "harness|assin2_sts|assin2_sts|None|15": 0.6435083220682054, - "harness|faquad_nli|faquad_nli|None|15": 0.2571759752087621, + "harness|faquad_nli|faquad_nli|None|15": 0.3857639628131432, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7439554115565462, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6878230465186987, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5056575293768903 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6742100391691871 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5801259622113366, @@ -135,9 +135,9 @@ "main_score": 0.6435083220682054 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.2571759752087621, + "f1_macro,all": 0.3857639628131432, "acc,all": 0.23076923076923078, - "main_score": 0.2571759752087621 + "main_score": 0.3857639628131432 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.7439554115565462, @@ -150,9 +150,9 @@ "main_score": 0.6878230465186987 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5056575293768903, + "f1_macro,all": 0.6742100391691871, "acc,all": 0.6940298507462687, - "main_score": 0.5056575293768903 + "main_score": 0.6742100391691871 } }, "config_tasks": { diff --git a/meraGPT/mera-mix-4x7B/raw_2024-06-13T04-11-32.982548/results.json b/meraGPT/mera-mix-4x7B/raw_2024-06-13T04-11-32.982548/results.json index 9d80a377c698655456891f6eb6eedcd2eb92ae6b..3742d345891d31d1c21d46c936648b7c0707208a 100644 --- a/meraGPT/mera-mix-4x7B/raw_2024-06-13T04-11-32.982548/results.json +++ b/meraGPT/mera-mix-4x7B/raw_2024-06-13T04-11-32.982548/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9215686274509803, - "acc,all": 0.9215686274509803, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7819146834745879, - "mse,all": 0.4272753267973856, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5507649513212796, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__USP_2019": 0.475, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2021": 0.5, - "acc,exam_id__USP_2022": 0.4489795918367347, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__USP_2020": 0.5178571428571429, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6396081175647306, - "acc,exam_id__2022": 0.6015037593984962, - "acc,exam_id__2016_2": 0.6260162601626016, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2010": 0.6666666666666666, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2009": 0.6782608695652174 - }, - "faquad_nli": { - "f1_macro,all": 0.7733499377334994, - "acc,all": 0.816923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8551991680384323, - "acc,all": 0.8564285714285714 - }, - "oab_exams": { - "acc,all": 0.4132118451025057, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2016-19": 0.48717948717948717, - "acc,exam_id__2014-14": 0.5375, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2015-17": 0.5, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2016-20a": 0.3, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2016-20": 0.3625, - "acc,exam_id__2015-16": 0.35, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2012-08": 0.45, - "acc,exam_id__2012-06": 0.4875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6733010180074539, - "acc,all": 0.6980023501762632 - }, - "tweetsentbr": { - "f1_macro,all": 0.48562285602515487, - "acc,all": 0.6980099502487562, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9215686274509803, + "acc,all": 0.9215686274509803, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7819146834745879, + "mse,all": 0.4272753267973856, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5507649513212796, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__USP_2019": 0.475, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2021": 0.5, + "acc,exam_id__USP_2022": 0.4489795918367347, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__USP_2020": 0.5178571428571429, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6396081175647306, + "acc,exam_id__2022": 0.6015037593984962, + "acc,exam_id__2016_2": 0.6260162601626016, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2010": 0.6666666666666666, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2009": 0.6782608695652174 + }, + "faquad_nli": { + "f1_macro,all": 0.7733499377334994, + "acc,all": 0.816923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8551991680384323, + "acc,all": 0.8564285714285714 + }, + "oab_exams": { + "acc,all": 0.4132118451025057, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2016-19": 0.48717948717948717, + "acc,exam_id__2014-14": 0.5375, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2015-17": 0.5, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2016-20a": 0.3, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2016-20": 0.3625, + "acc,exam_id__2015-16": 0.35, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2012-08": 0.45, + "acc,exam_id__2012-06": 0.4875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6733010180074539, + "acc,all": 0.6980023501762632 + }, + "tweetsentbr": { + "f1_macro,all": 0.6474971413668732, + "acc,all": 0.6980099502487562, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "09d965c5ef9b66ce419986027e03a915cb869e43", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "09d965c5ef9b66ce419986027e03a915cb869e43", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=meraGPT/mera-mix-4x7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=meraGPT/mera-mix-4x7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "f2a0116" + "git_hash": "f2a0116" } \ No newline at end of file diff --git a/meraGPT/mera-mix-4x7B/results_2024-06-13T04-11-32.982548.json b/meraGPT/mera-mix-4x7B/results_2024-06-13T04-11-32.982548.json index 0ae82f6eeb020e4d241b659124d5e80f4a343e27..5b07a84f7e2382082aa84fba0fb7c5f9baa629a1 100644 --- a/meraGPT/mera-mix-4x7B/results_2024-06-13T04-11-32.982548.json +++ b/meraGPT/mera-mix-4x7B/results_2024-06-13T04-11-32.982548.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.677171244968736, - "all_grouped_npm": 0.5237531532626083, + "all_grouped_average": 0.6951572766733715, + "all_grouped_npm": 0.5505180813945062, "all_grouped": { "enem_challenge": 0.6396081175647306, "bluex": 0.5507649513212796, @@ -45,7 +45,7 @@ "faquad_nli": 0.7733499377334994, "hatebr_offensive": 0.8551991680384323, "portuguese_hate_speech": 0.6733010180074539, - "tweetsentbr": 0.48562285602515487 + "tweetsentbr": 0.6474971413668732 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6396081175647306, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7733499377334994, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8551991680384323, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6733010180074539, - "harness|tweetsentbr|tweetsentbr|None|25": 0.48562285602515487 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6474971413668732 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6396081175647306, @@ -150,9 +150,9 @@ "main_score": 0.6733010180074539 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.48562285602515487, + "f1_macro,all": 0.6474971413668732, "acc,all": 0.6980099502487562, - "main_score": 0.48562285602515487 + "main_score": 0.6474971413668732 } }, "config_tasks": { diff --git a/meta-llama/Llama-2-7b-chat-hf/raw_2024-04-20T05-48-24.116596/results.json b/meta-llama/Llama-2-7b-chat-hf/raw_2024-04-20T05-48-24.116596/results.json index 363e2bad9956fa6a639e41ea0d424979303ca8b9..cd8a3ad6ccb5be302b249f4b53d38dd8c6d8523d 100644 --- a/meta-llama/Llama-2-7b-chat-hf/raw_2024-04-20T05-48-24.116596/results.json +++ b/meta-llama/Llama-2-7b-chat-hf/raw_2024-04-20T05-48-24.116596/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.5127860216587118, - "acc,all": 0.7671568627450981, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.5255824308244532, - "mse,all": 1.2552941176470587, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.2906815020862309, - "acc,exam_id__USP_2023": 0.29545454545454547, - "acc,exam_id__UNICAMP_2019": 0.32, - "acc,exam_id__UNICAMP_2023": 0.3488372093023256, - "acc,exam_id__UNICAMP_2022": 0.3333333333333333, - "acc,exam_id__UNICAMP_2024": 0.4222222222222222, - "acc,exam_id__USP_2018": 0.2222222222222222, - "acc,exam_id__USP_2022": 0.14285714285714285, - "acc,exam_id__USP_2020": 0.25, - "acc,exam_id__USP_2019": 0.2, - "acc,exam_id__USP_2024": 0.24390243902439024, - "acc,exam_id__UNICAMP_2018": 0.35185185185185186, - "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803, - "acc,exam_id__UNICAMP_2020": 0.38181818181818183, - "acc,exam_id__USP_2021": 0.23076923076923078, - "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.2351294611616515, - "acc,exam_id__2014": 0.24770642201834864, - "acc,exam_id__2017": 0.23275862068965517, - "acc,exam_id__2009": 0.25217391304347825, - "acc,exam_id__2016": 0.2066115702479339, - "acc,exam_id__2013": 0.17592592592592593, - "acc,exam_id__2015": 0.23529411764705882, - "acc,exam_id__2016_2": 0.24390243902439024, - "acc,exam_id__2023": 0.18518518518518517, - "acc,exam_id__2011": 0.23931623931623933, - "acc,exam_id__2022": 0.24060150375939848, - "acc,exam_id__2012": 0.33620689655172414, - "acc,exam_id__2010": 0.23076923076923078 - }, - "faquad_nli": { - "f1_macro,all": 0.5483679076695784, - "acc,all": 0.5615384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.5716817966023459, - "acc,all": 0.8557142857142858 - }, - "oab_exams": { - "acc,all": 0.30660592255125285, - "acc,exam_id__2012-06a": 0.325, - "acc,exam_id__2013-10": 0.2625, - "acc,exam_id__2010-02": 0.35, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2014-13": 0.25, - "acc,exam_id__2014-15": 0.32051282051282054, - "acc,exam_id__2017-24": 0.2625, - "acc,exam_id__2016-20": 0.3125, - "acc,exam_id__2018-25": 0.2125, - "acc,exam_id__2010-01": 0.2823529411764706, - "acc,exam_id__2015-16": 0.325, - "acc,exam_id__2016-20a": 0.25, - "acc,exam_id__2011-05": 0.3, - "acc,exam_id__2013-11": 0.275, - "acc,exam_id__2013-12": 0.3375, - "acc,exam_id__2014-14": 0.275, - "acc,exam_id__2016-19": 0.38461538461538464, - "acc,exam_id__2015-18": 0.3625, - "acc,exam_id__2017-23": 0.35, - "acc,exam_id__2015-17": 0.2692307692307692, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2011-03": 0.2727272727272727, - "acc,exam_id__2012-06": 0.3, - "acc,exam_id__2012-08": 0.35, - "acc,exam_id__2017-22": 0.2875, - "acc,exam_id__2012-07": 0.3375, - "acc,exam_id__2012-09": 0.33766233766233766, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.4143061646309523, - "acc,all": 0.6298472385428907 - }, - "tweetsentbr": { - "f1_macro,all": 0.4074313889184773, - "acc,all": 0.6388059701492538, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.7691790324880676, + "acc,all": 0.7671568627450981, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.5255824308244532, + "mse,all": 1.2552941176470587, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.2906815020862309, + "acc,exam_id__USP_2023": 0.29545454545454547, + "acc,exam_id__UNICAMP_2019": 0.32, + "acc,exam_id__UNICAMP_2023": 0.3488372093023256, + "acc,exam_id__UNICAMP_2022": 0.3333333333333333, + "acc,exam_id__UNICAMP_2024": 0.4222222222222222, + "acc,exam_id__USP_2018": 0.2222222222222222, + "acc,exam_id__USP_2022": 0.14285714285714285, + "acc,exam_id__USP_2020": 0.25, + "acc,exam_id__USP_2019": 0.2, + "acc,exam_id__USP_2024": 0.24390243902439024, + "acc,exam_id__UNICAMP_2018": 0.35185185185185186, + "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803, + "acc,exam_id__UNICAMP_2020": 0.38181818181818183, + "acc,exam_id__USP_2021": 0.23076923076923078, + "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.2351294611616515, + "acc,exam_id__2014": 0.24770642201834864, + "acc,exam_id__2017": 0.23275862068965517, + "acc,exam_id__2009": 0.25217391304347825, + "acc,exam_id__2016": 0.2066115702479339, + "acc,exam_id__2013": 0.17592592592592593, + "acc,exam_id__2015": 0.23529411764705882, + "acc,exam_id__2016_2": 0.24390243902439024, + "acc,exam_id__2023": 0.18518518518518517, + "acc,exam_id__2011": 0.23931623931623933, + "acc,exam_id__2022": 0.24060150375939848, + "acc,exam_id__2012": 0.33620689655172414, + "acc,exam_id__2010": 0.23076923076923078 + }, + "faquad_nli": { + "f1_macro,all": 0.5483679076695784, + "acc,all": 0.5615384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8575226949035188, + "acc,all": 0.8557142857142858 + }, + "oab_exams": { + "acc,all": 0.30660592255125285, + "acc,exam_id__2012-06a": 0.325, + "acc,exam_id__2013-10": 0.2625, + "acc,exam_id__2010-02": 0.35, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2014-13": 0.25, + "acc,exam_id__2014-15": 0.32051282051282054, + "acc,exam_id__2017-24": 0.2625, + "acc,exam_id__2016-20": 0.3125, + "acc,exam_id__2018-25": 0.2125, + "acc,exam_id__2010-01": 0.2823529411764706, + "acc,exam_id__2015-16": 0.325, + "acc,exam_id__2016-20a": 0.25, + "acc,exam_id__2011-05": 0.3, + "acc,exam_id__2013-11": 0.275, + "acc,exam_id__2013-12": 0.3375, + "acc,exam_id__2014-14": 0.275, + "acc,exam_id__2016-19": 0.38461538461538464, + "acc,exam_id__2015-18": 0.3625, + "acc,exam_id__2017-23": 0.35, + "acc,exam_id__2015-17": 0.2692307692307692, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2011-03": 0.2727272727272727, + "acc,exam_id__2012-06": 0.3, + "acc,exam_id__2012-08": 0.35, + "acc,exam_id__2017-22": 0.2875, + "acc,exam_id__2012-07": 0.3375, + "acc,exam_id__2012-09": 0.33766233766233766, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6214592469464285, + "acc,all": 0.6298472385428907 + }, + "tweetsentbr": { + "f1_macro,all": 0.543241851891303, + "acc,all": 0.6388059701492538, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "f5db02db724555f92da89c216ac04704f23d4590", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 13543948288, - "model_num_parameters": 6738415616, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1426.9889705882354, - "min_seq_length": 1404, - "max_seq_length": 1493, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1665.9889705882354, - "min_seq_length": 1643, - "max_seq_length": 1732, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1710.7426981919332, - "min_seq_length": 1344, - "max_seq_length": 2470, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1589.9881035689293, - "min_seq_length": 1337, - "max_seq_length": 2629, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1649.1184615384616, - "min_seq_length": 1597, - "max_seq_length": 1756, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "f5db02db724555f92da89c216ac04704f23d4590", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 13543948288, + "model_num_parameters": 6738415616, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1486.9178571428572, - "min_seq_length": 1463, - "max_seq_length": 1733, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1362.4145785876992, - "min_seq_length": 1107, - "max_seq_length": 1844, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1426.9889705882354, + "min_seq_length": 1404, + "max_seq_length": 1493, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1665.9889705882354, + "min_seq_length": 1643, + "max_seq_length": 1732, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1710.7426981919332, + "min_seq_length": 1344, + "max_seq_length": 2470, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1589.9881035689293, + "min_seq_length": 1337, + "max_seq_length": 2629, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1649.1184615384616, + "min_seq_length": 1597, + "max_seq_length": 1756, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1486.9178571428572, + "min_seq_length": 1463, + "max_seq_length": 1733, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1362.4145785876992, + "min_seq_length": 1107, + "max_seq_length": 1844, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1978.801410105758, + "min_seq_length": 1944, + "max_seq_length": 2022, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1929.6845771144278, + "min_seq_length": 1908, + "max_seq_length": 2047, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1978.801410105758, - "min_seq_length": 1944, - "max_seq_length": 2022, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=meta-llama/Llama-2-7b-chat-hf,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1929.6845771144278, - "min_seq_length": 1908, - "max_seq_length": 2047, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=meta-llama/Llama-2-7b-chat-hf,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/meta-llama/Llama-2-7b-chat-hf/results_2024-04-20T05-48-24.116596.json b/meta-llama/Llama-2-7b-chat-hf/results_2024-04-20T05-48-24.116596.json index c42cc6d0341af8d12922c6566b0c50a92884c324..d2da2f176c646edc3e0b47bd9d70c6bdf757ab2b 100644 --- a/meta-llama/Llama-2-7b-chat-hf/results_2024-04-20T05-48-24.116596.json +++ b/meta-llama/Llama-2-7b-chat-hf/results_2024-04-20T05-48-24.116596.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.4236191773448505, - "all_grouped_npm": 0.11805308591870865, + "all_grouped_average": 0.5219744500580539, + "all_grouped_npm": 0.3051834642966301, "all_grouped": { "enem_challenge": 0.2351294611616515, "bluex": 0.2906815020862309, "oab_exams": 0.30660592255125285, - "assin2_rte": 0.5127860216587118, + "assin2_rte": 0.7691790324880676, "assin2_sts": 0.5255824308244532, "faquad_nli": 0.5483679076695784, - "hatebr_offensive": 0.5716817966023459, - "portuguese_hate_speech": 0.4143061646309523, - "tweetsentbr": 0.4074313889184773 + "hatebr_offensive": 0.8575226949035188, + "portuguese_hate_speech": 0.6214592469464285, + "tweetsentbr": 0.543241851891303 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.2351294611616515, "harness|bluex|bluex|None|3": 0.2906815020862309, "harness|oab_exams|oab_exams|None|3": 0.30660592255125285, - "harness|assin2_rte|assin2_rte|None|15": 0.5127860216587118, + "harness|assin2_rte|assin2_rte|None|15": 0.7691790324880676, "harness|assin2_sts|assin2_sts|None|15": 0.5255824308244532, "harness|faquad_nli|faquad_nli|None|15": 0.5483679076695784, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5716817966023459, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4143061646309523, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4074313889184773 + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8575226949035188, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6214592469464285, + "harness|tweetsentbr|tweetsentbr|None|25": 0.543241851891303 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.2351294611616515, @@ -125,9 +125,9 @@ "main_score": 0.30660592255125285 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.5127860216587118, + "f1_macro,all": 0.7691790324880676, "acc,all": 0.7671568627450981, - "main_score": 0.5127860216587118 + "main_score": 0.7691790324880676 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.5255824308244532, @@ -140,19 +140,19 @@ "main_score": 0.5483679076695784 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.5716817966023459, + "f1_macro,all": 0.8575226949035188, "acc,all": 0.8557142857142858, - "main_score": 0.5716817966023459 + "main_score": 0.8575226949035188 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.4143061646309523, + "f1_macro,all": 0.6214592469464285, "acc,all": 0.6298472385428907, - "main_score": 0.4143061646309523 + "main_score": 0.6214592469464285 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4074313889184773, + "f1_macro,all": 0.543241851891303, "acc,all": 0.6388059701492538, - "main_score": 0.4074313889184773 + "main_score": 0.543241851891303 } }, "config_tasks": { diff --git a/microsoft/Phi-3-medium-4k-instruct/raw_2024-05-21T17-06-43.203552/results.json b/microsoft/Phi-3-medium-4k-instruct/raw_2024-05-21T17-06-43.203552/results.json index 27777d4beb1d1b29d860da625e6e596169feb8d3..7b93920c2eb9a52a5e66a02bdf0c0d1428a26b65 100644 --- a/microsoft/Phi-3-medium-4k-instruct/raw_2024-05-21T17-06-43.203552/results.json +++ b/microsoft/Phi-3-medium-4k-instruct/raw_2024-05-21T17-06-43.203552/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9354260952140543, - "acc,all": 0.9354575163398693, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7031484731508717, - "mse,all": 0.8242647058823528, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.6244784422809457, - "acc,exam_id__USP_2022": 0.5918367346938775, - "acc,exam_id__USP_2018": 0.5925925925925926, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2020": 0.6428571428571429, - "acc,exam_id__UNICAMP_2019": 0.66, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__USP_2021": 0.6153846153846154, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__UNICAMP_2024": 0.6, - "acc,exam_id__UNICAMP_2020": 0.6727272727272727, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2023": 0.75, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7368789363191043, - "acc,exam_id__2022": 0.6917293233082706, - "acc,exam_id__2012": 0.7758620689655172, - "acc,exam_id__2014": 0.7339449541284404, - "acc,exam_id__2017": 0.7241379310344828, - "acc,exam_id__2016_2": 0.7398373983739838, - "acc,exam_id__2009": 0.6869565217391305, - "acc,exam_id__2015": 0.7394957983193278, - "acc,exam_id__2016": 0.71900826446281, - "acc,exam_id__2023": 0.7851851851851852, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2011": 0.8034188034188035, - "acc,exam_id__2010": 0.7435897435897436 - }, - "faquad_nli": { - "f1_macro,all": 0.7501808645324652, - "acc,all": 0.7907692307692308, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8307624651156005, - "acc,all": 0.8342857142857143 - }, - "oab_exams": { - "acc,all": 0.5198177676537585, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2014-13": 0.45, - "acc,exam_id__2015-16": 0.525, - "acc,exam_id__2014-15": 0.5897435897435898, - "acc,exam_id__2012-06a": 0.5375, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2013-10": 0.4875, - "acc,exam_id__2016-20a": 0.4875, - "acc,exam_id__2014-14": 0.5375, - "acc,exam_id__2012-07": 0.55, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2013-11": 0.575, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2016-21": 0.475, - "acc,exam_id__2012-08": 0.65, - "acc,exam_id__2011-03": 0.48484848484848486, - "acc,exam_id__2013-12": 0.55, - "acc,exam_id__2011-04": 0.475, - "acc,exam_id__2015-17": 0.5641025641025641, - "acc,exam_id__2011-05": 0.625, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2010-02": 0.56, - "acc,exam_id__2016-20": 0.5, - "acc,exam_id__2017-24": 0.5375, - "acc,exam_id__2017-22": 0.5625, - "acc,exam_id__2017-23": 0.5125, - "acc,exam_id__2012-06": 0.575, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7397953683034728, - "acc,all": 0.7967097532314924 - }, - "tweetsentbr": { - "f1_macro,all": 0.4972201091980745, - "acc,all": 0.7054726368159204, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9354260952140543, + "acc,all": 0.9354575163398693, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7031484731508717, + "mse,all": 0.8242647058823528, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.6244784422809457, + "acc,exam_id__USP_2022": 0.5918367346938775, + "acc,exam_id__USP_2018": 0.5925925925925926, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2020": 0.6428571428571429, + "acc,exam_id__UNICAMP_2019": 0.66, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__USP_2021": 0.6153846153846154, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__UNICAMP_2024": 0.6, + "acc,exam_id__UNICAMP_2020": 0.6727272727272727, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2023": 0.75, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7368789363191043, + "acc,exam_id__2022": 0.6917293233082706, + "acc,exam_id__2012": 0.7758620689655172, + "acc,exam_id__2014": 0.7339449541284404, + "acc,exam_id__2017": 0.7241379310344828, + "acc,exam_id__2016_2": 0.7398373983739838, + "acc,exam_id__2009": 0.6869565217391305, + "acc,exam_id__2015": 0.7394957983193278, + "acc,exam_id__2016": 0.71900826446281, + "acc,exam_id__2023": 0.7851851851851852, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2011": 0.8034188034188035, + "acc,exam_id__2010": 0.7435897435897436 + }, + "faquad_nli": { + "f1_macro,all": 0.7501808645324652, + "acc,all": 0.7907692307692308, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8307624651156005, + "acc,all": 0.8342857142857143 + }, + "oab_exams": { + "acc,all": 0.5198177676537585, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2014-13": 0.45, + "acc,exam_id__2015-16": 0.525, + "acc,exam_id__2014-15": 0.5897435897435898, + "acc,exam_id__2012-06a": 0.5375, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2013-10": 0.4875, + "acc,exam_id__2016-20a": 0.4875, + "acc,exam_id__2014-14": 0.5375, + "acc,exam_id__2012-07": 0.55, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2013-11": 0.575, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2016-21": 0.475, + "acc,exam_id__2012-08": 0.65, + "acc,exam_id__2011-03": 0.48484848484848486, + "acc,exam_id__2013-12": 0.55, + "acc,exam_id__2011-04": 0.475, + "acc,exam_id__2015-17": 0.5641025641025641, + "acc,exam_id__2011-05": 0.625, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2010-02": 0.56, + "acc,exam_id__2016-20": 0.5, + "acc,exam_id__2017-24": 0.5375, + "acc,exam_id__2017-22": 0.5625, + "acc,exam_id__2017-23": 0.5125, + "acc,exam_id__2012-06": 0.575, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7397953683034728, + "acc,all": 0.7967097532314924 + }, + "tweetsentbr": { + "f1_macro,all": 0.6629601455974328, + "acc,all": 0.7054726368159204, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "bbf1ecfd43e264d27c821771254f13ea9ee97b20", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 27920476160, - "model_num_parameters": 13960238080, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1268.9889705882354, - "min_seq_length": 1246, - "max_seq_length": 1335, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1499.9889705882354, - "min_seq_length": 1477, - "max_seq_length": 1566, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1609.7426981919332, - "min_seq_length": 1243, - "max_seq_length": 2369, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1488.9881035689293, - "min_seq_length": 1236, - "max_seq_length": 2528, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1478.1184615384616, - "min_seq_length": 1426, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "bbf1ecfd43e264d27c821771254f13ea9ee97b20", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 27920476160, + "model_num_parameters": 13960238080, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1262.9178571428572, - "min_seq_length": 1239, - "max_seq_length": 1509, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1258.4145785876992, - "min_seq_length": 1003, - "max_seq_length": 1740, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1268.9889705882354, + "min_seq_length": 1246, + "max_seq_length": 1335, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1499.9889705882354, + "min_seq_length": 1477, + "max_seq_length": 1566, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1609.7426981919332, + "min_seq_length": 1243, + "max_seq_length": 2369, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1488.9881035689293, + "min_seq_length": 1236, + "max_seq_length": 2528, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1478.1184615384616, + "min_seq_length": 1426, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1262.9178571428572, + "min_seq_length": 1239, + "max_seq_length": 1509, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1258.4145785876992, + "min_seq_length": 1003, + "max_seq_length": 1740, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1751.801410105758, + "min_seq_length": 1717, + "max_seq_length": 1795, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1518.6845771144278, + "min_seq_length": 1497, + "max_seq_length": 1636, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1751.801410105758, - "min_seq_length": 1717, - "max_seq_length": 1795, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=microsoft/Phi-3-medium-4k-instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1518.6845771144278, - "min_seq_length": 1497, - "max_seq_length": 1636, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=microsoft/Phi-3-medium-4k-instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/microsoft/Phi-3-medium-4k-instruct/results_2024-05-21T17-06-43.203552.json b/microsoft/Phi-3-medium-4k-instruct/results_2024-05-21T17-06-43.203552.json index ecb40e826d5a3f6d13a9ef22b489684b1015a907..be64d35b2d3845de3c7610d10b3918336bba5a14 100644 --- a/microsoft/Phi-3-medium-4k-instruct/results_2024-05-21T17-06-43.203552.json +++ b/microsoft/Phi-3-medium-4k-instruct/results_2024-05-21T17-06-43.203552.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7041898357520386, - "all_grouped_npm": 0.5638881727505701, + "all_grouped_average": 0.7226053953519673, + "all_grouped_npm": 0.591292279298083, "all_grouped": { "enem_challenge": 0.7368789363191043, "bluex": 0.6244784422809457, @@ -45,7 +45,7 @@ "faquad_nli": 0.7501808645324652, "hatebr_offensive": 0.8307624651156005, "portuguese_hate_speech": 0.7397953683034728, - "tweetsentbr": 0.4972201091980745 + "tweetsentbr": 0.6629601455974328 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7368789363191043, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7501808645324652, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8307624651156005, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7397953683034728, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4972201091980745 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6629601455974328 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7368789363191043, @@ -150,9 +150,9 @@ "main_score": 0.7397953683034728 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4972201091980745, + "f1_macro,all": 0.6629601455974328, "acc,all": 0.7054726368159204, - "main_score": 0.4972201091980745 + "main_score": 0.6629601455974328 } }, "config_tasks": { diff --git a/microsoft/phi-1_5/raw_2024-03-08T11-28-41.393728/results.json b/microsoft/phi-1_5/raw_2024-03-08T11-28-41.393728/results.json index acf1bcc592b6bd89d443020170dc3351c263dcca..77f79d5bce3c95ac56a9ed433cd45567f2d390d3 100644 --- a/microsoft/phi-1_5/raw_2024-03-08T11-28-41.393728/results.json +++ b/microsoft/phi-1_5/raw_2024-03-08T11-28-41.393728/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.3333333333333333, - "acc,all": 0.5, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.1301716664085566, - "mse,all": 1.0333455882352942, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.23504867872044508, - "acc,exam_id__UNICAMP_2022": 0.2564102564102564, - "acc,exam_id__UNICAMP_2024": 0.24444444444444444, - "acc,exam_id__USP_2024": 0.2926829268292683, - "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, - "acc,exam_id__USP_2021": 0.23076923076923078, - "acc,exam_id__UNICAMP_2020": 0.2727272727272727, - "acc,exam_id__USP_2019": 0.15, - "acc,exam_id__USP_2020": 0.23214285714285715, - "acc,exam_id__UNICAMP_2019": 0.28, - "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, - "acc,exam_id__UNICAMP_2023": 0.27906976744186046, - "acc,exam_id__USP_2018": 0.16666666666666666, - "acc,exam_id__USP_2022": 0.14285714285714285, - "acc,exam_id__USP_2023": 0.1590909090909091, - "acc,exam_id__UNICAMP_2018": 0.2777777777777778, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.21623512946116166, - "acc,exam_id__2011": 0.23076923076923078, - "acc,exam_id__2009": 0.14782608695652175, - "acc,exam_id__2012": 0.25, - "acc,exam_id__2016": 0.24793388429752067, - "acc,exam_id__2017": 0.2413793103448276, - "acc,exam_id__2022": 0.16541353383458646, - "acc,exam_id__2023": 0.2074074074074074, - "acc,exam_id__2013": 0.2222222222222222, - "acc,exam_id__2015": 0.2184873949579832, - "acc,exam_id__2010": 0.2222222222222222, - "acc,exam_id__2016_2": 0.23577235772357724, - "acc,exam_id__2014": 0.21100917431192662 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.222010481181515, - "acc,all": 0.4992857142857143 - }, - "oab_exams": { - "acc,all": 0.23917995444191345, - "acc,exam_id__2017-23": 0.275, - "acc,exam_id__2010-02": 0.2, - "acc,exam_id__2012-06": 0.275, - "acc,exam_id__2016-20a": 0.325, - "acc,exam_id__2018-25": 0.325, - "acc,exam_id__2011-05": 0.1875, - "acc,exam_id__2013-10": 0.25, - "acc,exam_id__2015-16": 0.2, - "acc,exam_id__2013-12": 0.15, - "acc,exam_id__2016-21": 0.225, - "acc,exam_id__2011-04": 0.2125, - "acc,exam_id__2010-01": 0.18823529411764706, - "acc,exam_id__2012-07": 0.15, - "acc,exam_id__2012-08": 0.2375, - "acc,exam_id__2014-13": 0.275, - "acc,exam_id__2013-11": 0.1875, - "acc,exam_id__2015-18": 0.2375, - "acc,exam_id__2016-19": 0.2692307692307692, - "acc,exam_id__2014-14": 0.2625, - "acc,exam_id__2012-09": 0.22077922077922077, - "acc,exam_id__2012-06a": 0.3, - "acc,exam_id__2017-22": 0.3, - "acc,exam_id__2014-15": 0.2564102564102564, - "acc,exam_id__2015-17": 0.28205128205128205, - "acc,exam_id__2017-24": 0.1875, - "acc,exam_id__2016-20": 0.2375, - "acc,exam_id__2011-03": 0.25252525252525254, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.412292817679558, - "acc,all": 0.7015276145710928 - }, - "tweetsentbr": { - "f1_macro,all": 0.3288095806256486, - "acc,all": 0.43233830845771143, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.3333333333333333, + "acc,all": 0.5, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.1301716664085566, + "mse,all": 1.0333455882352942, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.23504867872044508, + "acc,exam_id__UNICAMP_2022": 0.2564102564102564, + "acc,exam_id__UNICAMP_2024": 0.24444444444444444, + "acc,exam_id__USP_2024": 0.2926829268292683, + "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, + "acc,exam_id__USP_2021": 0.23076923076923078, + "acc,exam_id__UNICAMP_2020": 0.2727272727272727, + "acc,exam_id__USP_2019": 0.15, + "acc,exam_id__USP_2020": 0.23214285714285715, + "acc,exam_id__UNICAMP_2019": 0.28, + "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608, + "acc,exam_id__UNICAMP_2023": 0.27906976744186046, + "acc,exam_id__USP_2018": 0.16666666666666666, + "acc,exam_id__USP_2022": 0.14285714285714285, + "acc,exam_id__USP_2023": 0.1590909090909091, + "acc,exam_id__UNICAMP_2018": 0.2777777777777778, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.21623512946116166, + "acc,exam_id__2011": 0.23076923076923078, + "acc,exam_id__2009": 0.14782608695652175, + "acc,exam_id__2012": 0.25, + "acc,exam_id__2016": 0.24793388429752067, + "acc,exam_id__2017": 0.2413793103448276, + "acc,exam_id__2022": 0.16541353383458646, + "acc,exam_id__2023": 0.2074074074074074, + "acc,exam_id__2013": 0.2222222222222222, + "acc,exam_id__2015": 0.2184873949579832, + "acc,exam_id__2010": 0.2222222222222222, + "acc,exam_id__2016_2": 0.23577235772357724, + "acc,exam_id__2014": 0.21100917431192662 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.33301572177227245, + "acc,all": 0.4992857142857143 + }, + "oab_exams": { + "acc,all": 0.23917995444191345, + "acc,exam_id__2017-23": 0.275, + "acc,exam_id__2010-02": 0.2, + "acc,exam_id__2012-06": 0.275, + "acc,exam_id__2016-20a": 0.325, + "acc,exam_id__2018-25": 0.325, + "acc,exam_id__2011-05": 0.1875, + "acc,exam_id__2013-10": 0.25, + "acc,exam_id__2015-16": 0.2, + "acc,exam_id__2013-12": 0.15, + "acc,exam_id__2016-21": 0.225, + "acc,exam_id__2011-04": 0.2125, + "acc,exam_id__2010-01": 0.18823529411764706, + "acc,exam_id__2012-07": 0.15, + "acc,exam_id__2012-08": 0.2375, + "acc,exam_id__2014-13": 0.275, + "acc,exam_id__2013-11": 0.1875, + "acc,exam_id__2015-18": 0.2375, + "acc,exam_id__2016-19": 0.2692307692307692, + "acc,exam_id__2014-14": 0.2625, + "acc,exam_id__2012-09": 0.22077922077922077, + "acc,exam_id__2012-06a": 0.3, + "acc,exam_id__2017-22": 0.3, + "acc,exam_id__2014-15": 0.2564102564102564, + "acc,exam_id__2015-17": 0.28205128205128205, + "acc,exam_id__2017-24": 0.1875, + "acc,exam_id__2016-20": 0.2375, + "acc,exam_id__2011-03": 0.25252525252525254, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.412292817679558, + "acc,all": 0.7015276145710928 + }, + "tweetsentbr": { + "f1_macro,all": 0.3288095806256486, + "acc,all": 0.43233830845771143, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 339, - "non_truncated": 13811, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 408, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "bffd3b29c4741576a3a97656bcb74956cffaeccf", - "model_dtype": "torch.float16", - "model_memory_footprint": 2842834432, - "model_num_parameters": 1418270720, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:1", - "batch_size": 16, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1493.483660130719, - "min_seq_length": 1468, - "max_seq_length": 1571, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1648.483660130719, - "min_seq_length": 1623, - "max_seq_length": 1726, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 183, - "non_truncated": 536, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 243, - "mean_seq_length": 1867.0458970792768, - "min_seq_length": 1452, - "max_seq_length": 2752, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.6620305980528514 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 154, - "non_truncated": 1275, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 163, - "mean_seq_length": 1796.664800559832, - "min_seq_length": 1506, - "max_seq_length": 2767, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.8859342197340796 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1751.283076923077, - "min_seq_length": 1689, - "max_seq_length": 1895, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1328.2042857142858, - "min_seq_length": 1303, - "max_seq_length": 1610, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 339, + "non_truncated": 13811, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 408, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "bffd3b29c4741576a3a97656bcb74956cffaeccf", + "model_dtype": "torch.float16", + "model_memory_footprint": 2842834432, + "model_num_parameters": 1418270720, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:1", + "batch_size": 16, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 2, - "non_truncated": 2193, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 2, - "mean_seq_length": 1554.2952164009112, - "min_seq_length": 1249, - "max_seq_length": 2129, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9990888382687926 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1493.483660130719, + "min_seq_length": 1468, + "max_seq_length": 1571, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1648.483660130719, + "min_seq_length": 1623, + "max_seq_length": 1726, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 183, + "non_truncated": 536, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 243, + "mean_seq_length": 1867.0458970792768, + "min_seq_length": 1452, + "max_seq_length": 2752, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.6620305980528514 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 154, + "non_truncated": 1275, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 163, + "mean_seq_length": 1796.664800559832, + "min_seq_length": 1506, + "max_seq_length": 2767, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.8859342197340796 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1751.283076923077, + "min_seq_length": 1689, + "max_seq_length": 1895, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1328.2042857142858, + "min_seq_length": 1303, + "max_seq_length": 1610, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 2, + "non_truncated": 2193, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 2, + "mean_seq_length": 1554.2952164009112, + "min_seq_length": 1249, + "max_seq_length": 2129, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9990888382687926 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1882.6063454759108, + "min_seq_length": 1846, + "max_seq_length": 1915, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1617.4860696517412, + "min_seq_length": 1596, + "max_seq_length": 1669, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1882.6063454759108, - "min_seq_length": 1846, - "max_seq_length": 1915, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=microsoft/phi-1_5,dtype=float16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1617.4860696517412, - "min_seq_length": 1596, - "max_seq_length": 1669, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=microsoft/phi-1_5,dtype=float16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/microsoft/phi-1_5/results_2024-03-08T11-28-41.393728.json b/microsoft/phi-1_5/results_2024-03-08T11-28-41.393728.json index ac531128656892fbec7510c22ab163ea26684d2a..e23772d07957577647c5dc4853842a70790c79fc 100644 --- a/microsoft/phi-1_5/results_2024-03-08T11-28-41.393728.json +++ b/microsoft/phi-1_5/results_2024-03-08T11-28-41.393728.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.2840818682517694, - "all_grouped_npm": -0.09968724524802117, + "all_grouped_average": 0.2964157838729647, + "all_grouped_npm": -0.07501941400563063, "all_grouped": { "enem_challenge": 0.21623512946116166, "bluex": 0.23504867872044508, @@ -43,7 +43,7 @@ "assin2_rte": 0.3333333333333333, "assin2_sts": 0.1301716664085566, "faquad_nli": 0.4396551724137931, - "hatebr_offensive": 0.222010481181515, + "hatebr_offensive": 0.33301572177227245, "portuguese_hate_speech": 0.412292817679558, "tweetsentbr": 0.3288095806256486 }, @@ -54,7 +54,7 @@ "harness|assin2_rte|assin2_rte|None|15": 0.3333333333333333, "harness|assin2_sts|assin2_sts|None|15": 0.1301716664085566, "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.222010481181515, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.33301572177227245, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.412292817679558, "harness|tweetsentbr|tweetsentbr|None|25": 0.3288095806256486 }, @@ -140,9 +140,9 @@ "main_score": 0.4396551724137931 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.222010481181515, + "f1_macro,all": 0.33301572177227245, "acc,all": 0.4992857142857143, - "main_score": 0.222010481181515 + "main_score": 0.33301572177227245 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { "f1_macro,all": 0.412292817679558, diff --git a/mistralai/Mistral-7B-Instruct-v0.2/raw_2024-02-22T04-38-02.592138/results.json b/mistralai/Mistral-7B-Instruct-v0.2/raw_2024-02-22T04-38-02.592138/results.json index adc3344db0fcf3c563009adc81c092dceeb4ecfd..9e12fb615e43c3163a82bdbabbb3844c40a951e6 100644 --- a/mistralai/Mistral-7B-Instruct-v0.2/raw_2024-02-22T04-38-02.592138/results.json +++ b/mistralai/Mistral-7B-Instruct-v0.2/raw_2024-02-22T04-38-02.592138/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9051791381281589, - "acc,all": 0.9052287581699346, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.743171027388709, - "mse,all": 0.7835389477124184, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.521557719054242, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__UNICAMP_2023": 0.46511627906976744, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__UNICAMP_2018": 0.37037037037037035, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2020": 0.44642857142857145, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2022": 0.5102040816326531, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2019": 0.58, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5892232330300909, - "acc,exam_id__2016": 0.5950413223140496, - "acc,exam_id__2012": 0.5775862068965517, - "acc,exam_id__2013": 0.5925925925925926, - "acc,exam_id__2010": 0.5811965811965812, - "acc,exam_id__2017": 0.5517241379310345, - "acc,exam_id__2022": 0.5263157894736842, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2023": 0.6148148148148148, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2015": 0.5546218487394958, - "acc,exam_id__2016_2": 0.5772357723577236 - }, - "faquad_nli": { - "f1_macro,all": 0.6610295761007225, - "acc,all": 0.6984615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8121063440638714, - "acc,all": 0.8157142857142857 - }, - "oab_exams": { - "acc,all": 0.39225512528473805, - "acc,exam_id__2011-03": 0.3939393939393939, - "acc,exam_id__2014-15": 0.38461538461538464, - "acc,exam_id__2011-05": 0.3625, - "acc,exam_id__2014-14": 0.4625, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2015-17": 0.6025641025641025, - "acc,exam_id__2015-18": 0.3375, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2017-24": 0.3375, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2013-11": 0.3875, - "acc,exam_id__2015-16": 0.35, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2010-02": 0.38, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2017-23": 0.3375, - "acc,exam_id__2013-10": 0.4, - "acc,exam_id__2012-09": 0.4025974025974026, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2012-06": 0.375, - "acc,exam_id__2016-20": 0.3375, - "acc,exam_id__2016-20a": 0.2875, - "acc,exam_id__2012-08": 0.4, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7026455906821963, - "acc,all": 0.7532314923619271 - }, - "tweetsentbr": { - "f1_macro,all": 0.5056730165368587, - "acc,all": 0.7014925373134329, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9051791381281589, + "acc,all": 0.9052287581699346, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.743171027388709, + "mse,all": 0.7835389477124184, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.521557719054242, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__UNICAMP_2023": 0.46511627906976744, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__UNICAMP_2018": 0.37037037037037035, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2020": 0.44642857142857145, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2022": 0.5102040816326531, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2019": 0.58, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5892232330300909, + "acc,exam_id__2016": 0.5950413223140496, + "acc,exam_id__2012": 0.5775862068965517, + "acc,exam_id__2013": 0.5925925925925926, + "acc,exam_id__2010": 0.5811965811965812, + "acc,exam_id__2017": 0.5517241379310345, + "acc,exam_id__2022": 0.5263157894736842, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2023": 0.6148148148148148, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2015": 0.5546218487394958, + "acc,exam_id__2016_2": 0.5772357723577236 + }, + "faquad_nli": { + "f1_macro,all": 0.6610295761007225, + "acc,all": 0.6984615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8121063440638714, + "acc,all": 0.8157142857142857 + }, + "oab_exams": { + "acc,all": 0.39225512528473805, + "acc,exam_id__2011-03": 0.3939393939393939, + "acc,exam_id__2014-15": 0.38461538461538464, + "acc,exam_id__2011-05": 0.3625, + "acc,exam_id__2014-14": 0.4625, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2015-17": 0.6025641025641025, + "acc,exam_id__2015-18": 0.3375, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2017-24": 0.3375, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2013-11": 0.3875, + "acc,exam_id__2015-16": 0.35, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2010-02": 0.38, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2017-23": 0.3375, + "acc,exam_id__2013-10": 0.4, + "acc,exam_id__2012-09": 0.4025974025974026, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2012-06": 0.375, + "acc,exam_id__2016-20": 0.3375, + "acc,exam_id__2016-20a": 0.2875, + "acc,exam_id__2012-08": 0.4, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7026455906821963, + "acc,all": 0.7532314923619271 + }, + "tweetsentbr": { + "f1_macro,all": 0.6742306887158116, + "acc,all": 0.7014925373134329, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "b70aa86578567ba3301b21c8a27bea4e8f6d6d61", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "b70aa86578567ba3301b21c8a27bea4e8f6d6d61", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/mistralai/Mistral-7B-Instruct-v0.2/results_2024-02-22T04-38-02.592138.json b/mistralai/Mistral-7B-Instruct-v0.2/results_2024-02-22T04-38-02.592138.json index 2cd2781a6e53dc7b1d195eb2332450da30912bf8..497d48f192bb6c1850af39e293eb6696b474306f 100644 --- a/mistralai/Mistral-7B-Instruct-v0.2/results_2024-02-22T04-38-02.592138.json +++ b/mistralai/Mistral-7B-Instruct-v0.2/results_2024-02-22T04-38-02.592138.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6480934189188431, - "all_grouped_npm": 0.47857208866539325, + "all_grouped_average": 0.6668220491609489, + "all_grouped_npm": 0.5064420741447173, "all_grouped": { "enem_challenge": 0.5892232330300909, "bluex": 0.521557719054242, @@ -45,7 +45,7 @@ "faquad_nli": 0.6610295761007225, "hatebr_offensive": 0.8121063440638714, "portuguese_hate_speech": 0.7026455906821963, - "tweetsentbr": 0.5056730165368587 + "tweetsentbr": 0.6742306887158116 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5892232330300909, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6610295761007225, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8121063440638714, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7026455906821963, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5056730165368587 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6742306887158116 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5892232330300909, @@ -150,9 +150,9 @@ "main_score": 0.7026455906821963 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5056730165368587, + "f1_macro,all": 0.6742306887158116, "acc,all": 0.7014925373134329, - "main_score": 0.5056730165368587 + "main_score": 0.6742306887158116 } }, "config_tasks": { diff --git a/mistralai/Mistral-7B-Instruct-v0.3/raw_2024-05-23T00-44-16.674482/results.json b/mistralai/Mistral-7B-Instruct-v0.3/raw_2024-05-23T00-44-16.674482/results.json index 111f3040a2153d9fa6139d011991a678b024408a..3b5eaa8680a2f55189fada7cb7c47b63741e559c 100644 --- a/mistralai/Mistral-7B-Instruct-v0.3/raw_2024-05-23T00-44-16.674482/results.json +++ b/mistralai/Mistral-7B-Instruct-v0.3/raw_2024-05-23T00-44-16.674482/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9027027027027027, - "acc,all": 0.9027777777777778, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7942968956652824, - "mse,all": 0.4601633986928105, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5312934631432545, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__USP_2020": 0.5, - "acc,exam_id__USP_2022": 0.42857142857142855, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6242127361791463, - "acc,exam_id__2016_2": 0.6016260162601627, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2015": 0.5546218487394958, - "acc,exam_id__2016": 0.6694214876033058, - "acc,exam_id__2017": 0.5258620689655172, - "acc,exam_id__2010": 0.5982905982905983, - "acc,exam_id__2012": 0.6293103448275862, - "acc,exam_id__2014": 0.6422018348623854, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2022": 0.6466165413533834, - "acc,exam_id__2023": 0.6444444444444445, - "acc,exam_id__2009": 0.591304347826087 - }, - "faquad_nli": { - "f1_macro,all": 0.6866859299725274, - "acc,all": 0.7184615384615385, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8343452944711321, - "acc,all": 0.8364285714285714 - }, - "oab_exams": { - "acc,all": 0.4359908883826879, - "acc,exam_id__2012-09": 0.4155844155844156, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2015-18": 0.425, - "acc,exam_id__2011-04": 0.35, - "acc,exam_id__2013-10": 0.375, - "acc,exam_id__2014-15": 0.5512820512820513, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2016-20": 0.45, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2010-02": 0.43, - "acc,exam_id__2016-21": 0.35, - "acc,exam_id__2013-12": 0.55, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2017-24": 0.3875, - "acc,exam_id__2012-08": 0.45, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2015-17": 0.5128205128205128, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2012-06": 0.4, - "acc,exam_id__2012-06a": 0.4875, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2015-16": 0.4375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6824631701748856, - "acc,all": 0.7121034077555817 - }, - "tweetsentbr": { - "f1_macro,all": 0.4747604946147119, - "acc,all": 0.6860696517412935, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9027027027027027, + "acc,all": 0.9027777777777778, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7942968956652824, + "mse,all": 0.4601633986928105, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5312934631432545, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__USP_2020": 0.5, + "acc,exam_id__USP_2022": 0.42857142857142855, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6242127361791463, + "acc,exam_id__2016_2": 0.6016260162601627, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2015": 0.5546218487394958, + "acc,exam_id__2016": 0.6694214876033058, + "acc,exam_id__2017": 0.5258620689655172, + "acc,exam_id__2010": 0.5982905982905983, + "acc,exam_id__2012": 0.6293103448275862, + "acc,exam_id__2014": 0.6422018348623854, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2022": 0.6466165413533834, + "acc,exam_id__2023": 0.6444444444444445, + "acc,exam_id__2009": 0.591304347826087 + }, + "faquad_nli": { + "f1_macro,all": 0.6866859299725274, + "acc,all": 0.7184615384615385, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8343452944711321, + "acc,all": 0.8364285714285714 + }, + "oab_exams": { + "acc,all": 0.4359908883826879, + "acc,exam_id__2012-09": 0.4155844155844156, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2015-18": 0.425, + "acc,exam_id__2011-04": 0.35, + "acc,exam_id__2013-10": 0.375, + "acc,exam_id__2014-15": 0.5512820512820513, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2016-20": 0.45, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2010-02": 0.43, + "acc,exam_id__2016-21": 0.35, + "acc,exam_id__2013-12": 0.55, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2017-24": 0.3875, + "acc,exam_id__2012-08": 0.45, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2015-17": 0.5128205128205128, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2012-06": 0.4, + "acc,exam_id__2012-06a": 0.4875, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2015-16": 0.4375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6824631701748856, + "acc,all": 0.7121034077555817 + }, + "tweetsentbr": { + "f1_macro,all": 0.6330139928196159, + "acc,all": 0.6860696517412935, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 2, - "non_truncated": 14148, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 2, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "83e9aa141f2e28c82232fea5325f54edf17c43de", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15032926208, - "model_num_parameters": 7248023552, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1403.7455065359477, - "min_seq_length": 1380, - "max_seq_length": 1470, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1642.7455065359477, - "min_seq_length": 1619, - "max_seq_length": 1709, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1732.9262865090404, - "min_seq_length": 1356, - "max_seq_length": 2533, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1633.039188243527, - "min_seq_length": 1367, - "max_seq_length": 2631, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1643.9876923076922, - "min_seq_length": 1588, - "max_seq_length": 1764, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 2, + "non_truncated": 14148, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 2, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "83e9aa141f2e28c82232fea5325f54edf17c43de", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15032926208, + "model_num_parameters": 7248023552, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1384.3878571428572, - "min_seq_length": 1361, - "max_seq_length": 1635, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1378.764464692483, - "min_seq_length": 1112, - "max_seq_length": 1881, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1403.7455065359477, + "min_seq_length": 1380, + "max_seq_length": 1470, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1642.7455065359477, + "min_seq_length": 1619, + "max_seq_length": 1709, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1732.9262865090404, + "min_seq_length": 1356, + "max_seq_length": 2533, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1633.039188243527, + "min_seq_length": 1367, + "max_seq_length": 2631, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1643.9876923076922, + "min_seq_length": 1588, + "max_seq_length": 1764, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1384.3878571428572, + "min_seq_length": 1361, + "max_seq_length": 1635, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1378.764464692483, + "min_seq_length": 1112, + "max_seq_length": 1881, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1885.3360752056403, + "min_seq_length": 1850, + "max_seq_length": 1924, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1631.2492537313433, + "min_seq_length": 1610, + "max_seq_length": 1726, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1885.3360752056403, - "min_seq_length": 1850, - "max_seq_length": 1924, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1631.2492537313433, - "min_seq_length": 1610, - "max_seq_length": 1726, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.3,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/mistralai/Mistral-7B-Instruct-v0.3/results_2024-05-23T00-44-16.674482.json b/mistralai/Mistral-7B-Instruct-v0.3/results_2024-05-23T00-44-16.674482.json index 52bcfaba1fde4e3c20c8c617d238edd3f6a11ee0..3bf94aeadd76471058bfdcb60772d3ecdf862206 100644 --- a/mistralai/Mistral-7B-Instruct-v0.3/results_2024-05-23T00-44-16.674482.json +++ b/mistralai/Mistral-7B-Instruct-v0.3/results_2024-05-23T00-44-16.674482.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.662972397256259, - "all_grouped_npm": 0.4972041070748634, + "all_grouped_average": 0.6805561192790262, + "all_grouped_npm": 0.5233703600849337, "all_grouped": { "enem_challenge": 0.6242127361791463, "bluex": 0.5312934631432545, @@ -45,7 +45,7 @@ "faquad_nli": 0.6866859299725274, "hatebr_offensive": 0.8343452944711321, "portuguese_hate_speech": 0.6824631701748856, - "tweetsentbr": 0.4747604946147119 + "tweetsentbr": 0.6330139928196159 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6242127361791463, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6866859299725274, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8343452944711321, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6824631701748856, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4747604946147119 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6330139928196159 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6242127361791463, @@ -150,9 +150,9 @@ "main_score": 0.6824631701748856 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4747604946147119, + "f1_macro,all": 0.6330139928196159, "acc,all": 0.6860696517412935, - "main_score": 0.4747604946147119 + "main_score": 0.6330139928196159 } }, "config_tasks": { diff --git a/mistralai/Mixtral-8x7B-Instruct-v0.1/raw_2024-02-18T18-26-33.687924/results.json b/mistralai/Mixtral-8x7B-Instruct-v0.1/raw_2024-02-18T18-26-33.687924/results.json index 7c353d77495d9a58c59f6fe1f00c99b932f57cb9..0db2be57fc22bf3440bc6bd775da555c7da24edc 100644 --- a/mistralai/Mixtral-8x7B-Instruct-v0.1/raw_2024-02-18T18-26-33.687924/results.json +++ b/mistralai/Mixtral-8x7B-Instruct-v0.1/raw_2024-02-18T18-26-33.687924/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.6176169915932025, - "acc,all": 0.926062091503268, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8273826913007483, - "mse,all": 0.38455882352941173, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5980528511821975, - "acc,exam_id__UNICAMP_2020": 0.6181818181818182, - "acc,exam_id__USP_2021": 0.6153846153846154, - "acc,exam_id__UNICAMP_2023": 0.7441860465116279, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__UNICAMP_2019": 0.64, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__USP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2022": 0.5714285714285714, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7130860741777467, - "acc,exam_id__2015": 0.7563025210084033, - "acc,exam_id__2016_2": 0.6747967479674797, - "acc,exam_id__2012": 0.6724137931034483, - "acc,exam_id__2022": 0.6691729323308271, - "acc,exam_id__2013": 0.7037037037037037, - "acc,exam_id__2014": 0.7431192660550459, - "acc,exam_id__2016": 0.6776859504132231, - "acc,exam_id__2009": 0.7478260869565218, - "acc,exam_id__2023": 0.7111111111111111, - "acc,exam_id__2010": 0.7435897435897436, - "acc,exam_id__2017": 0.6551724137931034, - "acc,exam_id__2011": 0.811965811965812 - }, - "faquad_nli": { - "f1_macro,all": 0.8009434691825845, - "acc,all": 0.8476923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7848550686238143, - "acc,all": 0.7928571428571428 - }, - "oab_exams": { - "acc,all": 0.49567198177676536, - "acc,exam_id__2017-22": 0.6, - "acc,exam_id__2015-18": 0.525, - "acc,exam_id__2016-20": 0.4625, - "acc,exam_id__2013-12": 0.525, - "acc,exam_id__2012-07": 0.4875, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2014-13": 0.5, - "acc,exam_id__2012-06a": 0.55, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2015-16": 0.525, - "acc,exam_id__2011-03": 0.40404040404040403, - "acc,exam_id__2012-09": 0.45454545454545453, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2017-23": 0.4875, - "acc,exam_id__2014-15": 0.6282051282051282, - "acc,exam_id__2016-21": 0.475, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2010-02": 0.49, - "acc,exam_id__2012-06": 0.575, - "acc,exam_id__2014-14": 0.55, - "acc,exam_id__2013-10": 0.5125, - "acc,exam_id__2011-04": 0.4375, - "acc,exam_id__2013-11": 0.4375, - "acc,exam_id__2015-17": 0.6923076923076923, - "acc,exam_id__2016-20a": 0.4875, - "acc,exam_id__2017-24": 0.375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7313245694217261, - "acc,all": 0.7532314923619271 - }, - "tweetsentbr": { - "f1_macro,all": 0.7046312157201152, - "acc,all": 0.7472636815920398, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9264254873898037, + "acc,all": 0.926062091503268, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8273826913007483, + "mse,all": 0.38455882352941173, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5980528511821975, + "acc,exam_id__UNICAMP_2020": 0.6181818181818182, + "acc,exam_id__USP_2021": 0.6153846153846154, + "acc,exam_id__UNICAMP_2023": 0.7441860465116279, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__UNICAMP_2019": 0.64, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__USP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2022": 0.5714285714285714, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7130860741777467, + "acc,exam_id__2015": 0.7563025210084033, + "acc,exam_id__2016_2": 0.6747967479674797, + "acc,exam_id__2012": 0.6724137931034483, + "acc,exam_id__2022": 0.6691729323308271, + "acc,exam_id__2013": 0.7037037037037037, + "acc,exam_id__2014": 0.7431192660550459, + "acc,exam_id__2016": 0.6776859504132231, + "acc,exam_id__2009": 0.7478260869565218, + "acc,exam_id__2023": 0.7111111111111111, + "acc,exam_id__2010": 0.7435897435897436, + "acc,exam_id__2017": 0.6551724137931034, + "acc,exam_id__2011": 0.811965811965812 + }, + "faquad_nli": { + "f1_macro,all": 0.8009434691825845, + "acc,all": 0.8476923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7848550686238143, + "acc,all": 0.7928571428571428 + }, + "oab_exams": { + "acc,all": 0.49567198177676536, + "acc,exam_id__2017-22": 0.6, + "acc,exam_id__2015-18": 0.525, + "acc,exam_id__2016-20": 0.4625, + "acc,exam_id__2013-12": 0.525, + "acc,exam_id__2012-07": 0.4875, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2014-13": 0.5, + "acc,exam_id__2012-06a": 0.55, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2015-16": 0.525, + "acc,exam_id__2011-03": 0.40404040404040403, + "acc,exam_id__2012-09": 0.45454545454545453, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2017-23": 0.4875, + "acc,exam_id__2014-15": 0.6282051282051282, + "acc,exam_id__2016-21": 0.475, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2010-02": 0.49, + "acc,exam_id__2012-06": 0.575, + "acc,exam_id__2014-14": 0.55, + "acc,exam_id__2013-10": 0.5125, + "acc,exam_id__2011-04": 0.4375, + "acc,exam_id__2013-11": 0.4375, + "acc,exam_id__2015-17": 0.6923076923076923, + "acc,exam_id__2016-20a": 0.4875, + "acc,exam_id__2017-24": 0.375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7313245694217261, + "acc,all": 0.7532314923619271 + }, + "tweetsentbr": { + "f1_macro,all": 0.7046312157201152, + "acc,all": 0.7472636815920398, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 2, - "accelerate_num_process": null, - "model_sha": "125c431e2ff41a156b9f9076f744d2f35dd6e67a", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 93942464512, - "model_num_parameters": 46702792704, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 2, + "accelerate_num_process": null, + "model_sha": "125c431e2ff41a156b9f9076f744d2f35dd6e67a", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 93942464512, + "model_num_parameters": 46702792704, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=mistralai/Mixtral-8x7B-Instruct-v0.1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=mistralai/Mixtral-8x7B-Instruct-v0.1,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/mistralai/Mixtral-8x7B-Instruct-v0.1/results_2024-02-18T18-26-33.687924.json b/mistralai/Mixtral-8x7B-Instruct-v0.1/results_2024-02-18T18-26-33.687924.json index 41b5f223f4d7b7d631c55936d3756fd1d5171407..d727c7e43455dae62fd4fb367fcaa8c96d4d0ef3 100644 --- a/mistralai/Mixtral-8x7B-Instruct-v0.1/results_2024-02-18T18-26-33.687924.json +++ b/mistralai/Mixtral-8x7B-Instruct-v0.1/results_2024-02-18T18-26-33.687924.json @@ -34,13 +34,13 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6970627681087668, - "all_grouped_npm": 0.5290515708117544, + "all_grouped_average": 0.731374823197278, + "all_grouped_npm": 0.5976756809887768, "all_grouped": { "enem_challenge": 0.7130860741777467, "bluex": 0.5980528511821975, "oab_exams": 0.49567198177676536, - "assin2_rte": 0.6176169915932025, + "assin2_rte": 0.9264254873898037, "assin2_sts": 0.8273826913007483, "faquad_nli": 0.8009434691825845, "hatebr_offensive": 0.7848550686238143, @@ -51,7 +51,7 @@ "harness|enem_challenge|enem_challenge|None|3": 0.7130860741777467, "harness|bluex|bluex|None|3": 0.5980528511821975, "harness|oab_exams|oab_exams|None|3": 0.49567198177676536, - "harness|assin2_rte|assin2_rte|None|15": 0.6176169915932025, + "harness|assin2_rte|assin2_rte|None|15": 0.9264254873898037, "harness|assin2_sts|assin2_sts|None|15": 0.8273826913007483, "harness|faquad_nli|faquad_nli|None|15": 0.8009434691825845, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7848550686238143, @@ -125,9 +125,9 @@ "main_score": 0.49567198177676536 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.6176169915932025, + "f1_macro,all": 0.9264254873898037, "acc,all": 0.926062091503268, - "main_score": 0.6176169915932025 + "main_score": 0.9264254873898037 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.8273826913007483, diff --git a/mlabonne/AlphaMonarch-7B/raw_2024-05-19T05-10-09.581920/results.json b/mlabonne/AlphaMonarch-7B/raw_2024-05-19T05-10-09.581920/results.json index d89d48978637b3a6fd6b85f302486a91af6d42df..2558f8915b08afc0dc4ceec045e82edb5d8153fa 100644 --- a/mlabonne/AlphaMonarch-7B/raw_2024-05-19T05-10-09.581920/results.json +++ b/mlabonne/AlphaMonarch-7B/raw_2024-05-19T05-10-09.581920/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.3847263153165095, - "acc,all": 0.6209150326797386, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.566431547018829, - "mse,all": 0.8070938709010175, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.4909596662030598, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__USP_2024": 0.5609756097560976, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2023": 0.5454545454545454, - "acc,exam_id__UNICAMP_2018": 0.37037037037037035, - "acc,exam_id__USP_2021": 0.3076923076923077, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2020": 0.44642857142857145, - "acc,exam_id__UNICAMP_2022": 0.48717948717948717, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2022": 0.5918367346938775, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5472358292512246, - "acc,exam_id__2014": 0.5504587155963303, - "acc,exam_id__2017": 0.5948275862068966, - "acc,exam_id__2023": 0.5481481481481482, - "acc,exam_id__2012": 0.47413793103448276, - "acc,exam_id__2022": 0.5037593984962406, - "acc,exam_id__2011": 0.6239316239316239, - "acc,exam_id__2016_2": 0.5365853658536586, - "acc,exam_id__2009": 0.5826086956521739, - "acc,exam_id__2010": 0.5470085470085471, - "acc,exam_id__2016": 0.5371900826446281, - "acc,exam_id__2015": 0.4957983193277311, - "acc,exam_id__2013": 0.5833333333333334 - }, - "faquad_nli": { - "f1_macro,all": 0.5282390058211012, - "acc,all": 0.7738461538461539, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6660653746489624, - "acc,all": 0.675 - }, - "oab_exams": { - "acc,all": 0.379498861047836, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2015-16": 0.35, - "acc,exam_id__2011-03": 0.3333333333333333, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2016-20": 0.3125, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2012-06": 0.45, - "acc,exam_id__2016-19": 0.41025641025641024, - "acc,exam_id__2016-21": 0.3375, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2013-10": 0.35, - "acc,exam_id__2015-18": 0.3625, - "acc,exam_id__2018-25": 0.3875, - "acc,exam_id__2017-23": 0.3625, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2017-24": 0.275, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2012-06a": 0.3375, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2016-20a": 0.4125, - "acc,exam_id__2012-07": 0.325, - "acc,exam_id__2010-02": 0.37, - "acc,exam_id__2015-17": 0.48717948717948717, - "acc,exam_id__2011-04": 0.35, - "acc,exam_id__2013-11": 0.3875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5932753095004035, - "acc,all": 0.6486486486486487 - }, - "tweetsentbr": { - "f1_macro,all": 0.3576935353691515, - "acc,all": 0.5119402985074627, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.5770894729747643, + "acc,all": 0.6209150326797386, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.566431547018829, + "mse,all": 0.8070938709010175, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.4909596662030598, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__USP_2024": 0.5609756097560976, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2023": 0.5454545454545454, + "acc,exam_id__UNICAMP_2018": 0.37037037037037035, + "acc,exam_id__USP_2021": 0.3076923076923077, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2020": 0.44642857142857145, + "acc,exam_id__UNICAMP_2022": 0.48717948717948717, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2022": 0.5918367346938775, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5472358292512246, + "acc,exam_id__2014": 0.5504587155963303, + "acc,exam_id__2017": 0.5948275862068966, + "acc,exam_id__2023": 0.5481481481481482, + "acc,exam_id__2012": 0.47413793103448276, + "acc,exam_id__2022": 0.5037593984962406, + "acc,exam_id__2011": 0.6239316239316239, + "acc,exam_id__2016_2": 0.5365853658536586, + "acc,exam_id__2009": 0.5826086956521739, + "acc,exam_id__2010": 0.5470085470085471, + "acc,exam_id__2016": 0.5371900826446281, + "acc,exam_id__2015": 0.4957983193277311, + "acc,exam_id__2013": 0.5833333333333334 + }, + "faquad_nli": { + "f1_macro,all": 0.5282390058211012, + "acc,all": 0.7738461538461539, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6660653746489624, + "acc,all": 0.675 + }, + "oab_exams": { + "acc,all": 0.379498861047836, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2015-16": 0.35, + "acc,exam_id__2011-03": 0.3333333333333333, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2016-20": 0.3125, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2012-06": 0.45, + "acc,exam_id__2016-19": 0.41025641025641024, + "acc,exam_id__2016-21": 0.3375, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2013-10": 0.35, + "acc,exam_id__2015-18": 0.3625, + "acc,exam_id__2018-25": 0.3875, + "acc,exam_id__2017-23": 0.3625, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2017-24": 0.275, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2012-06a": 0.3375, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2016-20a": 0.4125, + "acc,exam_id__2012-07": 0.325, + "acc,exam_id__2010-02": 0.37, + "acc,exam_id__2015-17": 0.48717948717948717, + "acc,exam_id__2011-04": 0.35, + "acc,exam_id__2013-11": 0.3875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5932753095004035, + "acc,all": 0.6486486486486487 + }, + "tweetsentbr": { + "f1_macro,all": 0.4769247138255353, + "acc,all": 0.5119402985074627, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "3de065d84411d74e5b3590f67f52b0b71faf6161", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "3de065d84411d74e5b3590f67f52b0b71faf6161", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=mlabonne/AlphaMonarch-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=mlabonne/AlphaMonarch-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/mlabonne/AlphaMonarch-7B/results_2024-05-19T05-10-09.581920.json b/mlabonne/AlphaMonarch-7B/results_2024-05-19T05-10-09.581920.json index 649703ab39344f2a58bf709e2f287b761aa74074..04e2db12033df6512f6d85541729d97d2c54355b 100644 --- a/mlabonne/AlphaMonarch-7B/results_2024-05-19T05-10-09.581920.json +++ b/mlabonne/AlphaMonarch-7B/results_2024-05-19T05-10-09.581920.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5015694937974531, - "all_grouped_npm": 0.22380179211871987, + "all_grouped_average": 0.5361910866990796, + "all_grouped_npm": 0.28626331036426855, "all_grouped": { "enem_challenge": 0.5472358292512246, "bluex": 0.4909596662030598, "oab_exams": 0.379498861047836, - "assin2_rte": 0.3847263153165095, + "assin2_rte": 0.5770894729747643, "assin2_sts": 0.566431547018829, "faquad_nli": 0.5282390058211012, "hatebr_offensive": 0.6660653746489624, "portuguese_hate_speech": 0.5932753095004035, - "tweetsentbr": 0.3576935353691515 + "tweetsentbr": 0.4769247138255353 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5472358292512246, "harness|bluex|bluex|None|3": 0.4909596662030598, "harness|oab_exams|oab_exams|None|3": 0.379498861047836, - "harness|assin2_rte|assin2_rte|None|15": 0.3847263153165095, + "harness|assin2_rte|assin2_rte|None|15": 0.5770894729747643, "harness|assin2_sts|assin2_sts|None|15": 0.566431547018829, "harness|faquad_nli|faquad_nli|None|15": 0.5282390058211012, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6660653746489624, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5932753095004035, - "harness|tweetsentbr|tweetsentbr|None|25": 0.3576935353691515 + "harness|tweetsentbr|tweetsentbr|None|25": 0.4769247138255353 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5472358292512246, @@ -125,9 +125,9 @@ "main_score": 0.379498861047836 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.3847263153165095, + "f1_macro,all": 0.5770894729747643, "acc,all": 0.6209150326797386, - "main_score": 0.3847263153165095 + "main_score": 0.5770894729747643 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.566431547018829, @@ -150,9 +150,9 @@ "main_score": 0.5932753095004035 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.3576935353691515, + "f1_macro,all": 0.4769247138255353, "acc,all": 0.5119402985074627, - "main_score": 0.3576935353691515 + "main_score": 0.4769247138255353 } }, "config_tasks": { diff --git a/mlabonne/Beyonder-4x7B-v3/raw_2024-05-26T14-45-26.008679/results.json b/mlabonne/Beyonder-4x7B-v3/raw_2024-05-26T14-45-26.008679/results.json index 3d655e9ea040b4f908d769fbd5013c32628d250c..ccbf40cdc62ae61d1a26e98f0d5a0958252bf0c7 100644 --- a/mlabonne/Beyonder-4x7B-v3/raw_2024-05-26T14-45-26.008679/results.json +++ b/mlabonne/Beyonder-4x7B-v3/raw_2024-05-26T14-45-26.008679/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.4170397068540878, - "acc,all": 0.6519607843137255, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.5739332533595589, - "mse,all": 0.7757371323529412, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5201668984700973, - "acc,exam_id__UNICAMP_2019": 0.52, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2020": 0.6181818181818182, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5787263820853744, - "acc,exam_id__2017": 0.6206896551724138, - "acc,exam_id__2013": 0.5925925925925926, - "acc,exam_id__2016": 0.5371900826446281, - "acc,exam_id__2010": 0.5641025641025641, - "acc,exam_id__2011": 0.6324786324786325, - "acc,exam_id__2009": 0.5739130434782609, - "acc,exam_id__2022": 0.5639097744360902, - "acc,exam_id__2015": 0.5714285714285714, - "acc,exam_id__2014": 0.5871559633027523, - "acc,exam_id__2023": 0.6148148148148148, - "acc,exam_id__2016_2": 0.5528455284552846, - "acc,exam_id__2012": 0.5344827586206896 - }, - "faquad_nli": { - "f1_macro,all": 0.5516489252623926, - "acc,all": 0.7723076923076924, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6746769273926633, - "acc,all": 0.6828571428571428 - }, - "oab_exams": { - "acc,all": 0.3968109339407745, - "acc,exam_id__2012-06": 0.3875, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2012-06a": 0.375, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2013-10": 0.325, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2018-25": 0.4125, - "acc,exam_id__2017-23": 0.325, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2014-13": 0.3625, - "acc,exam_id__2017-22": 0.475, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2016-19": 0.48717948717948717, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2016-20a": 0.35, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2015-17": 0.47435897435897434, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2016-20": 0.3375, - "acc,exam_id__2015-16": 0.375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5726351351351351, - "acc,all": 0.6122209165687427 - }, - "tweetsentbr": { - "f1_macro,all": 0.526973031703347, - "acc,all": 0.5567164179104478, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6255595602811317, + "acc,all": 0.6519607843137255, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.5739332533595589, + "mse,all": 0.7757371323529412, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5201668984700973, + "acc,exam_id__UNICAMP_2019": 0.52, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2020": 0.6181818181818182, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5787263820853744, + "acc,exam_id__2017": 0.6206896551724138, + "acc,exam_id__2013": 0.5925925925925926, + "acc,exam_id__2016": 0.5371900826446281, + "acc,exam_id__2010": 0.5641025641025641, + "acc,exam_id__2011": 0.6324786324786325, + "acc,exam_id__2009": 0.5739130434782609, + "acc,exam_id__2022": 0.5639097744360902, + "acc,exam_id__2015": 0.5714285714285714, + "acc,exam_id__2014": 0.5871559633027523, + "acc,exam_id__2023": 0.6148148148148148, + "acc,exam_id__2016_2": 0.5528455284552846, + "acc,exam_id__2012": 0.5344827586206896 + }, + "faquad_nli": { + "f1_macro,all": 0.5516489252623926, + "acc,all": 0.7723076923076924, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6746769273926633, + "acc,all": 0.6828571428571428 + }, + "oab_exams": { + "acc,all": 0.3968109339407745, + "acc,exam_id__2012-06": 0.3875, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2012-06a": 0.375, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2013-10": 0.325, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2018-25": 0.4125, + "acc,exam_id__2017-23": 0.325, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2014-13": 0.3625, + "acc,exam_id__2017-22": 0.475, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2016-19": 0.48717948717948717, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2016-20a": 0.35, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2015-17": 0.47435897435897434, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2016-20": 0.3375, + "acc,exam_id__2015-16": 0.375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5726351351351351, + "acc,all": 0.6122209165687427 + }, + "tweetsentbr": { + "f1_macro,all": 0.526973031703347, + "acc,all": 0.5567164179104478, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "8e923fa480f511ab54d79b44b0487768bdd3de4e", - "model_dtype": "torch.float16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "8e923fa480f511ab54d79b44b0487768bdd3de4e", + "model_dtype": "torch.float16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=mlabonne/Beyonder-4x7B-v3,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=mlabonne/Beyonder-4x7B-v3,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/mlabonne/Beyonder-4x7B-v3/results_2024-05-26T14-45-26.008679.json b/mlabonne/Beyonder-4x7B-v3/results_2024-05-26T14-45-26.008679.json index 7eb5c6329b0a57782efcc0c15f67b99d83afe96d..c87623e3f26383bb54ef0b670e566fda550c0b11 100644 --- a/mlabonne/Beyonder-4x7B-v3/results_2024-05-26T14-45-26.008679.json +++ b/mlabonne/Beyonder-4x7B-v3/results_2024-05-26T14-45-26.008679.json @@ -34,13 +34,13 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5347345771337145, - "all_grouped_npm": 0.2732245472467345, + "all_grouped_average": 0.5579034497367193, + "all_grouped_npm": 0.3195622924527442, "all_grouped": { "enem_challenge": 0.5787263820853744, "bluex": 0.5201668984700973, "oab_exams": 0.3968109339407745, - "assin2_rte": 0.4170397068540878, + "assin2_rte": 0.6255595602811317, "assin2_sts": 0.5739332533595589, "faquad_nli": 0.5516489252623926, "hatebr_offensive": 0.6746769273926633, @@ -51,7 +51,7 @@ "harness|enem_challenge|enem_challenge|None|3": 0.5787263820853744, "harness|bluex|bluex|None|3": 0.5201668984700973, "harness|oab_exams|oab_exams|None|3": 0.3968109339407745, - "harness|assin2_rte|assin2_rte|None|15": 0.4170397068540878, + "harness|assin2_rte|assin2_rte|None|15": 0.6255595602811317, "harness|assin2_sts|assin2_sts|None|15": 0.5739332533595589, "harness|faquad_nli|faquad_nli|None|15": 0.5516489252623926, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6746769273926633, @@ -125,9 +125,9 @@ "main_score": 0.3968109339407745 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.4170397068540878, + "f1_macro,all": 0.6255595602811317, "acc,all": 0.6519607843137255, - "main_score": 0.4170397068540878 + "main_score": 0.6255595602811317 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.5739332533595589, diff --git a/mlabonne/Llama-3-8B-Instruct-abliterated-dpomix/raw_2024-05-27T14-44-42.467656/results.json b/mlabonne/Llama-3-8B-Instruct-abliterated-dpomix/raw_2024-05-27T14-44-42.467656/results.json index 9dc77d6425260c9d5f59abd6876a3a54c187d270..795ee89c7563eaea6be9610e99c38168beeb7f13 100644 --- a/mlabonne/Llama-3-8B-Instruct-abliterated-dpomix/raw_2024-05-27T14-44-42.467656/results.json +++ b/mlabonne/Llama-3-8B-Instruct-abliterated-dpomix/raw_2024-05-27T14-44-42.467656/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9136602188080808, - "acc,all": 0.9138071895424836, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7676336804192703, - "mse,all": 0.6167238562091503, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5730180806675939, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__UNICAMP_2024": 0.6666666666666666, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__USP_2022": 0.5714285714285714, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7053883834849545, - "acc,exam_id__2011": 0.717948717948718, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2016": 0.7024793388429752, - "acc,exam_id__2016_2": 0.6585365853658537, - "acc,exam_id__2009": 0.7130434782608696, - "acc,exam_id__2012": 0.7241379310344828, - "acc,exam_id__2010": 0.7435897435897436, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2014": 0.6972477064220184, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2023": 0.7555555555555555 - }, - "faquad_nli": { - "f1_macro,all": 0.7143293906330614, - "acc,all": 0.7523076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8645687620632841, - "acc,all": 0.865 - }, - "oab_exams": { - "acc,all": 0.5097949886104783, - "acc,exam_id__2011-03": 0.42424242424242425, - "acc,exam_id__2014-13": 0.45, - "acc,exam_id__2013-10": 0.45, - "acc,exam_id__2017-24": 0.4375, - "acc,exam_id__2017-22": 0.6, - "acc,exam_id__2012-06a": 0.55, - "acc,exam_id__2016-20a": 0.425, - "acc,exam_id__2012-09": 0.45454545454545453, - "acc,exam_id__2015-16": 0.5, - "acc,exam_id__2011-04": 0.4625, - "acc,exam_id__2012-07": 0.5125, - "acc,exam_id__2014-14": 0.575, - "acc,exam_id__2014-15": 0.5641025641025641, - "acc,exam_id__2010-02": 0.57, - "acc,exam_id__2015-18": 0.525, - "acc,exam_id__2016-19": 0.5512820512820513, - "acc,exam_id__2012-06": 0.5875, - "acc,exam_id__2013-12": 0.5875, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2017-23": 0.4875, - "acc,exam_id__2013-11": 0.5375, - "acc,exam_id__2016-20": 0.5375, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2018-25": 0.525, - "acc,exam_id__2010-01": 0.43529411764705883, - "acc,exam_id__2012-08": 0.525, - "acc,exam_id__2015-17": 0.6538461538461539, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.628750468656138, - "acc,all": 0.6321974148061105 - }, - "tweetsentbr": { - "f1_macro,all": 0.4905941623046118, - "acc,all": 0.7174129353233831, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9136602188080808, + "acc,all": 0.9138071895424836, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7676336804192703, + "mse,all": 0.6167238562091503, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5730180806675939, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__UNICAMP_2024": 0.6666666666666666, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__USP_2022": 0.5714285714285714, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7053883834849545, + "acc,exam_id__2011": 0.717948717948718, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2016": 0.7024793388429752, + "acc,exam_id__2016_2": 0.6585365853658537, + "acc,exam_id__2009": 0.7130434782608696, + "acc,exam_id__2012": 0.7241379310344828, + "acc,exam_id__2010": 0.7435897435897436, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2014": 0.6972477064220184, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2023": 0.7555555555555555 + }, + "faquad_nli": { + "f1_macro,all": 0.7143293906330614, + "acc,all": 0.7523076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8645687620632841, + "acc,all": 0.865 + }, + "oab_exams": { + "acc,all": 0.5097949886104783, + "acc,exam_id__2011-03": 0.42424242424242425, + "acc,exam_id__2014-13": 0.45, + "acc,exam_id__2013-10": 0.45, + "acc,exam_id__2017-24": 0.4375, + "acc,exam_id__2017-22": 0.6, + "acc,exam_id__2012-06a": 0.55, + "acc,exam_id__2016-20a": 0.425, + "acc,exam_id__2012-09": 0.45454545454545453, + "acc,exam_id__2015-16": 0.5, + "acc,exam_id__2011-04": 0.4625, + "acc,exam_id__2012-07": 0.5125, + "acc,exam_id__2014-14": 0.575, + "acc,exam_id__2014-15": 0.5641025641025641, + "acc,exam_id__2010-02": 0.57, + "acc,exam_id__2015-18": 0.525, + "acc,exam_id__2016-19": 0.5512820512820513, + "acc,exam_id__2012-06": 0.5875, + "acc,exam_id__2013-12": 0.5875, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2017-23": 0.4875, + "acc,exam_id__2013-11": 0.5375, + "acc,exam_id__2016-20": 0.5375, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2018-25": 0.525, + "acc,exam_id__2010-01": 0.43529411764705883, + "acc,exam_id__2012-08": 0.525, + "acc,exam_id__2015-17": 0.6538461538461539, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.628750468656138, + "acc,all": 0.6321974148061105 + }, + "tweetsentbr": { + "f1_macro,all": 0.6541255497394823, + "acc,all": 0.7174129353233831, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "d0d40655b25f24afe504142cce427fc698f25239", - "model_dtype": "torch.float16", - "model_memory_footprint": 16060530688, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.769123783032, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "d0d40655b25f24afe504142cce427fc698f25239", + "model_dtype": "torch.float16", + "model_memory_footprint": 16060530688, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.769123783032, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=mlabonne/Llama-3-8B-Instruct-abliterated-dpomix,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=mlabonne/Llama-3-8B-Instruct-abliterated-dpomix,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/mlabonne/Llama-3-8B-Instruct-abliterated-dpomix/results_2024-05-27T14-44-42.467656.json b/mlabonne/Llama-3-8B-Instruct-abliterated-dpomix/results_2024-05-27T14-44-42.467656.json index ea5839a6fe2379ed4a8078e5cb5415a9503aca9c..3460333808ae67f442b46273d85917d475d605ef 100644 --- a/mlabonne/Llama-3-8B-Instruct-abliterated-dpomix/results_2024-05-27T14-44-42.467656.json +++ b/mlabonne/Llama-3-8B-Instruct-abliterated-dpomix/results_2024-05-27T14-44-42.467656.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6853042372941637, - "all_grouped_npm": 0.5283923058028247, + "all_grouped_average": 0.7034743914535936, + "all_grouped_npm": 0.555431225682929, "all_grouped": { "enem_challenge": 0.7053883834849545, "bluex": 0.5730180806675939, @@ -45,7 +45,7 @@ "faquad_nli": 0.7143293906330614, "hatebr_offensive": 0.8645687620632841, "portuguese_hate_speech": 0.628750468656138, - "tweetsentbr": 0.4905941623046118 + "tweetsentbr": 0.6541255497394823 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7053883834849545, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7143293906330614, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8645687620632841, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.628750468656138, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4905941623046118 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6541255497394823 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7053883834849545, @@ -150,9 +150,9 @@ "main_score": 0.628750468656138 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4905941623046118, + "f1_macro,all": 0.6541255497394823, "acc,all": 0.7174129353233831, - "main_score": 0.4905941623046118 + "main_score": 0.6541255497394823 } }, "config_tasks": { diff --git a/mlabonne/Monarch-7B/raw_2024-02-26T20-44-34.635482/results.json b/mlabonne/Monarch-7B/raw_2024-02-26T20-44-34.635482/results.json index b7076e18c616223b168c63fea398754290e75362..3c77903bf33061fdd67478aee124073080091f67 100644 --- a/mlabonne/Monarch-7B/raw_2024-02-26T20-44-34.635482/results.json +++ b/mlabonne/Monarch-7B/raw_2024-02-26T20-44-34.635482/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9240171227138955, - "acc,all": 0.9240196078431373, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7697959689241017, - "mse,all": 0.4341421977124183, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5257301808066759, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2022": 0.5128205128205128, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__USP_2019": 0.45, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.622813156053184, - "acc,exam_id__2011": 0.6581196581196581, - "acc,exam_id__2016": 0.5702479338842975, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2015": 0.6302521008403361, - "acc,exam_id__2010": 0.6837606837606838, - "acc,exam_id__2023": 0.6222222222222222, - "acc,exam_id__2017": 0.6379310344827587, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2009": 0.6, - "acc,exam_id__2016_2": 0.5853658536585366, - "acc,exam_id__2022": 0.5789473684210527 - }, - "faquad_nli": { - "f1_macro,all": 0.7656618319975595, - "acc,all": 0.82, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8376055098130599, - "acc,all": 0.84 - }, - "oab_exams": { - "acc,all": 0.42004555808656036, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2010-02": 0.43, - "acc,exam_id__2012-08": 0.4375, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2016-20a": 0.3, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2016-20": 0.3875, - "acc,exam_id__2015-17": 0.48717948717948717, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2013-11": 0.5125, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2013-12": 0.4125, - "acc,exam_id__2011-04": 0.45, - "acc,exam_id__2013-10": 0.4, - "acc,exam_id__2017-24": 0.3875, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2015-18": 0.4375, - "acc,exam_id__2018-25": 0.4625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6809139129374466, - "acc,all": 0.7109283196239718 - }, - "tweetsentbr": { - "f1_macro,all": 0.4840153980116987, - "acc,all": 0.6990049751243781, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9240171227138955, + "acc,all": 0.9240196078431373, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7697959689241017, + "mse,all": 0.4341421977124183, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5257301808066759, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2022": 0.5128205128205128, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__USP_2019": 0.45, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.622813156053184, + "acc,exam_id__2011": 0.6581196581196581, + "acc,exam_id__2016": 0.5702479338842975, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2015": 0.6302521008403361, + "acc,exam_id__2010": 0.6837606837606838, + "acc,exam_id__2023": 0.6222222222222222, + "acc,exam_id__2017": 0.6379310344827587, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2009": 0.6, + "acc,exam_id__2016_2": 0.5853658536585366, + "acc,exam_id__2022": 0.5789473684210527 + }, + "faquad_nli": { + "f1_macro,all": 0.7656618319975595, + "acc,all": 0.82, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8376055098130599, + "acc,all": 0.84 + }, + "oab_exams": { + "acc,all": 0.42004555808656036, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2010-02": 0.43, + "acc,exam_id__2012-08": 0.4375, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2016-20a": 0.3, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2016-20": 0.3875, + "acc,exam_id__2015-17": 0.48717948717948717, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2013-11": 0.5125, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2013-12": 0.4125, + "acc,exam_id__2011-04": 0.45, + "acc,exam_id__2013-10": 0.4, + "acc,exam_id__2017-24": 0.3875, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2015-18": 0.4375, + "acc,exam_id__2018-25": 0.4625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6809139129374466, + "acc,all": 0.7109283196239718 + }, + "tweetsentbr": { + "f1_macro,all": 0.6453538640155982, + "acc,all": 0.6990049751243781, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c7f7fb0a6a12f2abc4568bd9d48c320622a56e39", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c7f7fb0a6a12f2abc4568bd9d48c320622a56e39", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=mlabonne/Monarch-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=mlabonne/Monarch-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/mlabonne/Monarch-7B/results_2024-02-26T20-44-34.635482.json b/mlabonne/Monarch-7B/results_2024-02-26T20-44-34.635482.json index d44de567afad859fa4d6811329e99e6158dea4a8..9baaf900d78534bc2e8f97f6a0cc5778d48f0cb1 100644 --- a/mlabonne/Monarch-7B/results_2024-02-26T20-44-34.635482.json +++ b/mlabonne/Monarch-7B/results_2024-02-26T20-44-34.635482.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6700665154826869, - "all_grouped_npm": 0.513919089898855, + "all_grouped_average": 0.6879930117053424, + "all_grouped_npm": 0.540595423563521, "all_grouped": { "enem_challenge": 0.622813156053184, "bluex": 0.5257301808066759, @@ -45,7 +45,7 @@ "faquad_nli": 0.7656618319975595, "hatebr_offensive": 0.8376055098130599, "portuguese_hate_speech": 0.6809139129374466, - "tweetsentbr": 0.4840153980116987 + "tweetsentbr": 0.6453538640155982 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.622813156053184, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7656618319975595, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8376055098130599, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6809139129374466, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4840153980116987 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6453538640155982 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.622813156053184, @@ -150,9 +150,9 @@ "main_score": 0.6809139129374466 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4840153980116987, + "f1_macro,all": 0.6453538640155982, "acc,all": 0.6990049751243781, - "main_score": 0.4840153980116987 + "main_score": 0.6453538640155982 } }, "config_tasks": { diff --git a/mlabonne/NeuralMonarch-7B/raw_2024-05-21T02-25-10.601922/results.json b/mlabonne/NeuralMonarch-7B/raw_2024-05-21T02-25-10.601922/results.json index 90c045a717b10708364876df8804d12f10996663..083be7bf49af2259624a5054f94eaf7ee9970fe4 100644 --- a/mlabonne/NeuralMonarch-7B/raw_2024-05-21T02-25-10.601922/results.json +++ b/mlabonne/NeuralMonarch-7B/raw_2024-05-21T02-25-10.601922/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.38823462479376464, - "acc,all": 0.6237745098039216, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.5665781323229131, - "mse,all": 0.8022409461823565, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5006954102920723, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__USP_2020": 0.44642857142857145, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2022": 0.46153846153846156, - "acc,exam_id__USP_2024": 0.6097560975609756, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2022": 0.5714285714285714, - "acc,exam_id__UNICAMP_2018": 0.4074074074074074, - "acc,exam_id__USP_2021": 0.34615384615384615, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__UNICAMP_2019": 0.5, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5472358292512246, - "acc,exam_id__2022": 0.49624060150375937, - "acc,exam_id__2012": 0.47413793103448276, - "acc,exam_id__2010": 0.5470085470085471, - "acc,exam_id__2013": 0.5925925925925926, - "acc,exam_id__2009": 0.5826086956521739, - "acc,exam_id__2023": 0.5555555555555556, - "acc,exam_id__2017": 0.5948275862068966, - "acc,exam_id__2016": 0.5371900826446281, - "acc,exam_id__2015": 0.48739495798319327, - "acc,exam_id__2014": 0.5504587155963303, - "acc,exam_id__2016_2": 0.5365853658536586, - "acc,exam_id__2011": 0.6239316239316239 - }, - "faquad_nli": { - "f1_macro,all": 0.5272169690774342, - "acc,all": 0.7723076923076924, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6688152504087473, - "acc,all": 0.6771428571428572 - }, - "oab_exams": { - "acc,all": 0.3785876993166287, - "acc,exam_id__2012-07": 0.3375, - "acc,exam_id__2013-10": 0.35, - "acc,exam_id__2016-20a": 0.3875, - "acc,exam_id__2017-24": 0.2875, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2014-13": 0.3125, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2017-23": 0.35, - "acc,exam_id__2016-19": 0.3974358974358974, - "acc,exam_id__2012-06a": 0.325, - "acc,exam_id__2014-14": 0.4625, - "acc,exam_id__2012-08": 0.35, - "acc,exam_id__2010-01": 0.3411764705882353, - "acc,exam_id__2015-16": 0.35, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2015-18": 0.375, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2010-02": 0.38, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2018-25": 0.4, - "acc,exam_id__2015-17": 0.48717948717948717, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2016-21": 0.325, - "acc,exam_id__2013-12": 0.4125, - "acc,exam_id__2016-20": 0.325, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5913725139864434, - "acc,all": 0.6462984723854289 - }, - "tweetsentbr": { - "f1_macro,all": 0.3583266795197657, - "acc,all": 0.5139303482587064, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.5823519371906469, + "acc,all": 0.6237745098039216, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.5665781323229131, + "mse,all": 0.8022409461823565, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5006954102920723, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__USP_2020": 0.44642857142857145, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2022": 0.46153846153846156, + "acc,exam_id__USP_2024": 0.6097560975609756, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2022": 0.5714285714285714, + "acc,exam_id__UNICAMP_2018": 0.4074074074074074, + "acc,exam_id__USP_2021": 0.34615384615384615, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__UNICAMP_2019": 0.5, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5472358292512246, + "acc,exam_id__2022": 0.49624060150375937, + "acc,exam_id__2012": 0.47413793103448276, + "acc,exam_id__2010": 0.5470085470085471, + "acc,exam_id__2013": 0.5925925925925926, + "acc,exam_id__2009": 0.5826086956521739, + "acc,exam_id__2023": 0.5555555555555556, + "acc,exam_id__2017": 0.5948275862068966, + "acc,exam_id__2016": 0.5371900826446281, + "acc,exam_id__2015": 0.48739495798319327, + "acc,exam_id__2014": 0.5504587155963303, + "acc,exam_id__2016_2": 0.5365853658536586, + "acc,exam_id__2011": 0.6239316239316239 + }, + "faquad_nli": { + "f1_macro,all": 0.5272169690774342, + "acc,all": 0.7723076923076924, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6688152504087473, + "acc,all": 0.6771428571428572 + }, + "oab_exams": { + "acc,all": 0.3785876993166287, + "acc,exam_id__2012-07": 0.3375, + "acc,exam_id__2013-10": 0.35, + "acc,exam_id__2016-20a": 0.3875, + "acc,exam_id__2017-24": 0.2875, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2014-13": 0.3125, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2017-23": 0.35, + "acc,exam_id__2016-19": 0.3974358974358974, + "acc,exam_id__2012-06a": 0.325, + "acc,exam_id__2014-14": 0.4625, + "acc,exam_id__2012-08": 0.35, + "acc,exam_id__2010-01": 0.3411764705882353, + "acc,exam_id__2015-16": 0.35, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2015-18": 0.375, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2010-02": 0.38, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2018-25": 0.4, + "acc,exam_id__2015-17": 0.48717948717948717, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2016-21": 0.325, + "acc,exam_id__2013-12": 0.4125, + "acc,exam_id__2016-20": 0.325, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5913725139864434, + "acc,all": 0.6462984723854289 + }, + "tweetsentbr": { + "f1_macro,all": 0.4777689060263543, + "acc,all": 0.5139303482587064, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "bebae99e187a1ab3b009b2736a99a32cdc178c8f", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "bebae99e187a1ab3b009b2736a99a32cdc178c8f", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=mlabonne/NeuralMonarch-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=mlabonne/NeuralMonarch-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/mlabonne/NeuralMonarch-7B/results_2024-05-21T02-25-10.601922.json b/mlabonne/NeuralMonarch-7B/results_2024-05-21T02-25-10.601922.json index 1ff6c145d0115713a576cc4af60e32ad6f893ea8..cb235706a4f791dd52a716819c32ad9fa47d2c3a 100644 --- a/mlabonne/NeuralMonarch-7B/results_2024-05-21T02-25-10.601922.json +++ b/mlabonne/NeuralMonarch-7B/results_2024-05-21T02-25-10.601922.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.503007012107666, - "all_grouped_npm": 0.2259597432304489, + "all_grouped_average": 0.5378469608747184, + "all_grouped_npm": 0.28884596915108357, "all_grouped": { "enem_challenge": 0.5472358292512246, "bluex": 0.5006954102920723, "oab_exams": 0.3785876993166287, - "assin2_rte": 0.38823462479376464, + "assin2_rte": 0.5823519371906469, "assin2_sts": 0.5665781323229131, "faquad_nli": 0.5272169690774342, "hatebr_offensive": 0.6688152504087473, "portuguese_hate_speech": 0.5913725139864434, - "tweetsentbr": 0.3583266795197657 + "tweetsentbr": 0.4777689060263543 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5472358292512246, "harness|bluex|bluex|None|3": 0.5006954102920723, "harness|oab_exams|oab_exams|None|3": 0.3785876993166287, - "harness|assin2_rte|assin2_rte|None|15": 0.38823462479376464, + "harness|assin2_rte|assin2_rte|None|15": 0.5823519371906469, "harness|assin2_sts|assin2_sts|None|15": 0.5665781323229131, "harness|faquad_nli|faquad_nli|None|15": 0.5272169690774342, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6688152504087473, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5913725139864434, - "harness|tweetsentbr|tweetsentbr|None|25": 0.3583266795197657 + "harness|tweetsentbr|tweetsentbr|None|25": 0.4777689060263543 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5472358292512246, @@ -125,9 +125,9 @@ "main_score": 0.3785876993166287 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.38823462479376464, + "f1_macro,all": 0.5823519371906469, "acc,all": 0.6237745098039216, - "main_score": 0.38823462479376464 + "main_score": 0.5823519371906469 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.5665781323229131, @@ -150,9 +150,9 @@ "main_score": 0.5913725139864434 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.3583266795197657, + "f1_macro,all": 0.4777689060263543, "acc,all": 0.5139303482587064, - "main_score": 0.3583266795197657 + "main_score": 0.4777689060263543 } }, "config_tasks": { diff --git a/nicholasKluge/TeenyTinyLlama-160m/raw_2024-02-25T07-53-21.405861/results.json b/nicholasKluge/TeenyTinyLlama-160m/raw_2024-02-25T07-53-21.405861/results.json index fb1a211420f3c545ceb2b4d603af04041044906c..bb2696d20609b8a31370278ce1ff7509f1394d27 100644 --- a/nicholasKluge/TeenyTinyLlama-160m/raw_2024-02-25T07-53-21.405861/results.json +++ b/nicholasKluge/TeenyTinyLlama-160m/raw_2024-02-25T07-53-21.405861/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.539728635863353, - "acc,all": 0.5837418300653595, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.002356037361961063, - "mse,all": 2.7264460784313735, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.23087621696801114, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__USP_2022": 0.22448979591836735, - "acc,exam_id__USP_2023": 0.29545454545454547, - "acc,exam_id__UNICAMP_2018": 0.2222222222222222, - "acc,exam_id__UNICAMP_2019": 0.26, - "acc,exam_id__USP_2020": 0.23214285714285715, - "acc,exam_id__UNICAMP_2020": 0.2909090909090909, - "acc,exam_id__UNICAMP_2023": 0.16279069767441862, - "acc,exam_id__USP_2021": 0.1346153846153846, - "acc,exam_id__UNICAMP_2022": 0.20512820512820512, - "acc,exam_id__UNICAMP_2024": 0.3111111111111111, - "acc,exam_id__USP_2018": 0.2037037037037037, - "acc,exam_id__UNICAMP_2021_2": 0.23529411764705882, - "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, - "acc,exam_id__USP_2024": 0.12195121951219512, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.19244226731980407, - "acc,exam_id__2013": 0.19444444444444445, - "acc,exam_id__2016_2": 0.1951219512195122, - "acc,exam_id__2016": 0.2066115702479339, - "acc,exam_id__2011": 0.23076923076923078, - "acc,exam_id__2017": 0.16379310344827586, - "acc,exam_id__2023": 0.13333333333333333, - "acc,exam_id__2014": 0.14678899082568808, - "acc,exam_id__2012": 0.19827586206896552, - "acc,exam_id__2009": 0.20869565217391303, - "acc,exam_id__2015": 0.23529411764705882, - "acc,exam_id__2022": 0.22556390977443608, - "acc,exam_id__2010": 0.17094017094017094 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.3692355648458845, - "acc,all": 0.5085714285714286 - }, - "oab_exams": { - "acc,all": 0.22369020501138953, - "acc,exam_id__2016-20a": 0.2125, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2015-18": 0.2375, - "acc,exam_id__2014-14": 0.2375, - "acc,exam_id__2012-07": 0.1625, - "acc,exam_id__2015-16": 0.225, - "acc,exam_id__2011-05": 0.225, - "acc,exam_id__2012-06a": 0.2125, - "acc,exam_id__2017-23": 0.2, - "acc,exam_id__2016-19": 0.24358974358974358, - "acc,exam_id__2017-24": 0.1625, - "acc,exam_id__2016-20": 0.225, - "acc,exam_id__2017-22": 0.2375, - "acc,exam_id__2013-12": 0.175, - "acc,exam_id__2010-02": 0.2, - "acc,exam_id__2011-03": 0.2828282828282828, - "acc,exam_id__2012-08": 0.2, - "acc,exam_id__2013-10": 0.1875, - "acc,exam_id__2016-21": 0.25, - "acc,exam_id__2014-15": 0.24358974358974358, - "acc,exam_id__2018-25": 0.275, - "acc,exam_id__2014-13": 0.275, - "acc,exam_id__2010-01": 0.24705882352941178, - "acc,exam_id__2015-17": 0.24358974358974358, - "acc,exam_id__2013-11": 0.15, - "acc,exam_id__2011-04": 0.2625, - "acc,exam_id__2012-09": 0.22077922077922077, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.42625990224169896, - "acc,all": 0.6615746180963572 - }, - "tweetsentbr": { - "f1_macro,all": 0.11387655922153524, - "acc,all": 0.29203980099502486, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.539728635863353, + "acc,all": 0.5837418300653595, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.002356037361961063, + "mse,all": 2.7264460784313735, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.23087621696801114, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__USP_2022": 0.22448979591836735, + "acc,exam_id__USP_2023": 0.29545454545454547, + "acc,exam_id__UNICAMP_2018": 0.2222222222222222, + "acc,exam_id__UNICAMP_2019": 0.26, + "acc,exam_id__USP_2020": 0.23214285714285715, + "acc,exam_id__UNICAMP_2020": 0.2909090909090909, + "acc,exam_id__UNICAMP_2023": 0.16279069767441862, + "acc,exam_id__USP_2021": 0.1346153846153846, + "acc,exam_id__UNICAMP_2022": 0.20512820512820512, + "acc,exam_id__UNICAMP_2024": 0.3111111111111111, + "acc,exam_id__USP_2018": 0.2037037037037037, + "acc,exam_id__UNICAMP_2021_2": 0.23529411764705882, + "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654, + "acc,exam_id__USP_2024": 0.12195121951219512, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.19244226731980407, + "acc,exam_id__2013": 0.19444444444444445, + "acc,exam_id__2016_2": 0.1951219512195122, + "acc,exam_id__2016": 0.2066115702479339, + "acc,exam_id__2011": 0.23076923076923078, + "acc,exam_id__2017": 0.16379310344827586, + "acc,exam_id__2023": 0.13333333333333333, + "acc,exam_id__2014": 0.14678899082568808, + "acc,exam_id__2012": 0.19827586206896552, + "acc,exam_id__2009": 0.20869565217391303, + "acc,exam_id__2015": 0.23529411764705882, + "acc,exam_id__2022": 0.22556390977443608, + "acc,exam_id__2010": 0.17094017094017094 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.3692355648458845, + "acc,all": 0.5085714285714286 + }, + "oab_exams": { + "acc,all": 0.22369020501138953, + "acc,exam_id__2016-20a": 0.2125, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2015-18": 0.2375, + "acc,exam_id__2014-14": 0.2375, + "acc,exam_id__2012-07": 0.1625, + "acc,exam_id__2015-16": 0.225, + "acc,exam_id__2011-05": 0.225, + "acc,exam_id__2012-06a": 0.2125, + "acc,exam_id__2017-23": 0.2, + "acc,exam_id__2016-19": 0.24358974358974358, + "acc,exam_id__2017-24": 0.1625, + "acc,exam_id__2016-20": 0.225, + "acc,exam_id__2017-22": 0.2375, + "acc,exam_id__2013-12": 0.175, + "acc,exam_id__2010-02": 0.2, + "acc,exam_id__2011-03": 0.2828282828282828, + "acc,exam_id__2012-08": 0.2, + "acc,exam_id__2013-10": 0.1875, + "acc,exam_id__2016-21": 0.25, + "acc,exam_id__2014-15": 0.24358974358974358, + "acc,exam_id__2018-25": 0.275, + "acc,exam_id__2014-13": 0.275, + "acc,exam_id__2010-01": 0.24705882352941178, + "acc,exam_id__2015-17": 0.24358974358974358, + "acc,exam_id__2013-11": 0.15, + "acc,exam_id__2011-04": 0.2625, + "acc,exam_id__2012-09": 0.22077922077922077, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.42625990224169896, + "acc,all": 0.6615746180963572 + }, + "tweetsentbr": { + "f1_macro,all": 0.1518354122953803, + "acc,all": 0.29203980099502486, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 2, - "non_truncated": 14148, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "5a878fdaf9f180cdeeb0f716ffd7c1c562635451", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 358390784, - "model_num_parameters": 162417408, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 64, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 925.4232026143791, - "min_seq_length": 910, - "max_seq_length": 964, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 966.4232026143791, - "min_seq_length": 951, - "max_seq_length": 1005, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1171.817802503477, - "min_seq_length": 905, - "max_seq_length": 1802, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 4, - "mean_seq_length": 1008.4177746675997, - "min_seq_length": 830, - "max_seq_length": 2485, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972008397480754 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 969.1338461538462, - "min_seq_length": 937, - "max_seq_length": 1035, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 868.4407142857143, - "min_seq_length": 853, - "max_seq_length": 1062, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 2, + "non_truncated": 14148, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "5a878fdaf9f180cdeeb0f716ffd7c1c562635451", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 358390784, + "model_num_parameters": 162417408, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 64, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 833.024145785877, - "min_seq_length": 660, - "max_seq_length": 1109, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 925.4232026143791, + "min_seq_length": 910, + "max_seq_length": 964, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 966.4232026143791, + "min_seq_length": 951, + "max_seq_length": 1005, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1171.817802503477, + "min_seq_length": 905, + "max_seq_length": 1802, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 4, + "mean_seq_length": 1008.4177746675997, + "min_seq_length": 830, + "max_seq_length": 2485, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972008397480754 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 969.1338461538462, + "min_seq_length": 937, + "max_seq_length": 1035, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 868.4407142857143, + "min_seq_length": 853, + "max_seq_length": 1062, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 833.024145785877, + "min_seq_length": 660, + "max_seq_length": 1109, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1220.021151586369, + "min_seq_length": 1193, + "max_seq_length": 1256, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1155.4194029850746, + "min_seq_length": 1138, + "max_seq_length": 1212, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1220.021151586369, - "min_seq_length": 1193, - "max_seq_length": 1256, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=nicholasKluge/TeenyTinyLlama-160m,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1155.4194029850746, - "min_seq_length": 1138, - "max_seq_length": 1212, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=nicholasKluge/TeenyTinyLlama-160m,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/nicholasKluge/TeenyTinyLlama-160m/results_2024-02-25T07-53-21.405861.json b/nicholasKluge/TeenyTinyLlama-160m/results_2024-02-25T07-53-21.405861.json index 21651c349be786a78a9efc16ec6b019c94315919..7976ba891153f199515e9e71dbfdc8308ac6b212 100644 --- a/nicholasKluge/TeenyTinyLlama-160m/results_2024-02-25T07-53-21.405861.json +++ b/nicholasKluge/TeenyTinyLlama-160m/results_2024-02-25T07-53-21.405861.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.28201339569415895, - "all_grouped_npm": -0.07406339381740137, + "all_grouped_average": 0.28623104603569727, + "all_grouped_npm": -0.06778712842820742, "all_grouped": { "enem_challenge": 0.19244226731980407, "bluex": 0.23087621696801114, @@ -45,7 +45,7 @@ "faquad_nli": 0.4396551724137931, "hatebr_offensive": 0.3692355648458845, "portuguese_hate_speech": 0.42625990224169896, - "tweetsentbr": 0.11387655922153524 + "tweetsentbr": 0.1518354122953803 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.19244226731980407, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3692355648458845, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.42625990224169896, - "harness|tweetsentbr|tweetsentbr|None|25": 0.11387655922153524 + "harness|tweetsentbr|tweetsentbr|None|25": 0.1518354122953803 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.19244226731980407, @@ -150,9 +150,9 @@ "main_score": 0.42625990224169896 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.11387655922153524, + "f1_macro,all": 0.1518354122953803, "acc,all": 0.29203980099502486, - "main_score": 0.11387655922153524 + "main_score": 0.1518354122953803 } }, "config_tasks": { diff --git a/openai-community/openai-gpt/raw_2024-04-16T11-44-49.742462/results.json b/openai-community/openai-gpt/raw_2024-04-16T11-44-49.742462/results.json index 0d538e10ff9c4c9849b809fdb56a4c23a9de6e55..0da2c9925bc64d5906f385f4b64a2142d05a5140 100644 --- a/openai-community/openai-gpt/raw_2024-04-16T11-44-49.742462/results.json +++ b/openai-community/openai-gpt/raw_2024-04-16T11-44-49.742462/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.002171552660152009, - "acc,all": 0.0016339869281045752, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.042727458076029255, - "mse,all": 3.185171568627451, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.0, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__USP_2022": 0.0, - "acc,exam_id__USP_2023": 0.0, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__USP_2020": 0.0, - "acc,exam_id__USP_2024": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__USP_2018": 0.0, - "acc,exam_id__UNICAMP_2019": 0.0, - "acc,exam_id__USP_2021": 0.0, - "acc,exam_id__UNICAMP_2018": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.0, - "acc,exam_id__2013": 0.0, - "acc,exam_id__2022": 0.0, - "acc,exam_id__2023": 0.0, - "acc,exam_id__2016_2": 0.0, - "acc,exam_id__2010": 0.0, - "acc,exam_id__2014": 0.0, - "acc,exam_id__2009": 0.0, - "acc,exam_id__2015": 0.0, - "acc,exam_id__2016": 0.0, - "acc,exam_id__2012": 0.0, - "acc,exam_id__2011": 0.0, - "acc,exam_id__2017": 0.0 - }, - "faquad_nli": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.0009510223490252021, - "acc,all": 0.0007142857142857143 - }, - "oab_exams": { - "acc,all": 0.0, - "acc,exam_id__2014-15": 0.0, - "acc,exam_id__2017-24": 0.0, - "acc,exam_id__2012-06a": 0.0, - "acc,exam_id__2011-03": 0.0, - "acc,exam_id__2016-20a": 0.0, - "acc,exam_id__2012-06": 0.0, - "acc,exam_id__2010-02": 0.0, - "acc,exam_id__2015-16": 0.0, - "acc,exam_id__2010-01": 0.0, - "acc,exam_id__2015-18": 0.0, - "acc,exam_id__2016-19": 0.0, - "acc,exam_id__2012-08": 0.0, - "acc,exam_id__2017-22": 0.0, - "acc,exam_id__2018-25": 0.0, - "acc,exam_id__2012-09": 0.0, - "acc,exam_id__2013-10": 0.0, - "acc,exam_id__2011-04": 0.0, - "acc,exam_id__2015-17": 0.0, - "acc,exam_id__2011-05": 0.0, - "acc,exam_id__2016-21": 0.0, - "acc,exam_id__2016-20": 0.0, - "acc,exam_id__2013-11": 0.0, - "acc,exam_id__2012-07": 0.0, - "acc,exam_id__2014-13": 0.0, - "acc,exam_id__2017-23": 0.0, - "acc,exam_id__2013-12": 0.0, - "acc,exam_id__2014-14": 0.0, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.0022259321090706734, - "acc,all": 0.0023501762632197414 - }, - "tweetsentbr": { - "f1_macro,all": 0.09449339207048459, - "acc,all": 0.21343283582089553, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.003257328990228013, + "acc,all": 0.0016339869281045752, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.042727458076029255, + "mse,all": 3.185171568627451, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.0, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__USP_2022": 0.0, + "acc,exam_id__USP_2023": 0.0, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__USP_2020": 0.0, + "acc,exam_id__USP_2024": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__USP_2018": 0.0, + "acc,exam_id__UNICAMP_2019": 0.0, + "acc,exam_id__USP_2021": 0.0, + "acc,exam_id__UNICAMP_2018": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.0, + "acc,exam_id__2013": 0.0, + "acc,exam_id__2022": 0.0, + "acc,exam_id__2023": 0.0, + "acc,exam_id__2016_2": 0.0, + "acc,exam_id__2010": 0.0, + "acc,exam_id__2014": 0.0, + "acc,exam_id__2009": 0.0, + "acc,exam_id__2015": 0.0, + "acc,exam_id__2016": 0.0, + "acc,exam_id__2012": 0.0, + "acc,exam_id__2011": 0.0, + "acc,exam_id__2017": 0.0 + }, + "faquad_nli": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.0014265335235378032, + "acc,all": 0.0007142857142857143 + }, + "oab_exams": { + "acc,all": 0.0, + "acc,exam_id__2014-15": 0.0, + "acc,exam_id__2017-24": 0.0, + "acc,exam_id__2012-06a": 0.0, + "acc,exam_id__2011-03": 0.0, + "acc,exam_id__2016-20a": 0.0, + "acc,exam_id__2012-06": 0.0, + "acc,exam_id__2010-02": 0.0, + "acc,exam_id__2015-16": 0.0, + "acc,exam_id__2010-01": 0.0, + "acc,exam_id__2015-18": 0.0, + "acc,exam_id__2016-19": 0.0, + "acc,exam_id__2012-08": 0.0, + "acc,exam_id__2017-22": 0.0, + "acc,exam_id__2018-25": 0.0, + "acc,exam_id__2012-09": 0.0, + "acc,exam_id__2013-10": 0.0, + "acc,exam_id__2011-04": 0.0, + "acc,exam_id__2015-17": 0.0, + "acc,exam_id__2011-05": 0.0, + "acc,exam_id__2016-21": 0.0, + "acc,exam_id__2016-20": 0.0, + "acc,exam_id__2013-11": 0.0, + "acc,exam_id__2012-07": 0.0, + "acc,exam_id__2014-13": 0.0, + "acc,exam_id__2017-23": 0.0, + "acc,exam_id__2013-12": 0.0, + "acc,exam_id__2014-14": 0.0, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.0033388981636060097, + "acc,all": 0.0023501762632197414 + }, + "tweetsentbr": { + "f1_macro,all": 0.12599118942731277, + "acc,all": 0.21343283582089553, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 14150, - "non_truncated": 0, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 153165, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "1e0d4f3028acbffb47fe933cea64619c5ec1a002", - "model_dtype": "torch.float16", - "model_memory_footprint": 239365120, - "model_num_parameters": 116534784, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 64, - "max_length": 512, - "max_ctx_length": 480, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 2448, - "non_truncated": 0, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 27676, - "mean_seq_length": 1322.7389705882354, - "min_seq_length": 1298, - "max_seq_length": 1397, - "max_ctx_length": 480, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 3.6944444444444446 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 2448, - "non_truncated": 0, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 29380, - "mean_seq_length": 1493.7389705882354, - "min_seq_length": 1469, - "max_seq_length": 1568, - "max_ctx_length": 480, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 2.9983660130718954 }, - "bluex": { - "sample_size": 719, - "truncated": 719, - "non_truncated": 0, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2102, - "mean_seq_length": 1496.6578581363003, - "min_seq_length": 1159, - "max_seq_length": 2206, - "max_ctx_length": 480, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 0.07649513212795549 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1429, - "non_truncated": 0, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 4287, - "mean_seq_length": 1630.7963610916725, - "min_seq_length": 1354, - "max_seq_length": 3132, - "max_ctx_length": 480, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 0.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 650, - "non_truncated": 0, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 8125, - "mean_seq_length": 1630.416923076923, - "min_seq_length": 1572, - "max_seq_length": 1757, - "max_ctx_length": 480, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 2.5 + "model_meta": { + "truncated": 14150, + "non_truncated": 0, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 153165, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "1e0d4f3028acbffb47fe933cea64619c5ec1a002", + "model_dtype": "torch.float16", + "model_memory_footprint": 239365120, + "model_num_parameters": 116534784, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 64, + "max_length": 512, + "max_ctx_length": 480, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 1400, - "non_truncated": 0, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 23889, - "mean_seq_length": 1195.195, - "min_seq_length": 1173, - "max_seq_length": 1455, - "max_ctx_length": 480, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 7.936428571428571 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 2195, - "non_truncated": 0, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 6497, - "mean_seq_length": 1402.3845102505695, - "min_seq_length": 1120, - "max_seq_length": 1940, - "max_ctx_length": 480, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 0.04009111617312073 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 2448, + "non_truncated": 0, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 27676, + "mean_seq_length": 1322.7389705882354, + "min_seq_length": 1298, + "max_seq_length": 1397, + "max_ctx_length": 480, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 3.6944444444444446 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 2448, + "non_truncated": 0, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 29380, + "mean_seq_length": 1493.7389705882354, + "min_seq_length": 1469, + "max_seq_length": 1568, + "max_ctx_length": 480, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 2.9983660130718954 + }, + "bluex": { + "sample_size": 719, + "truncated": 719, + "non_truncated": 0, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2102, + "mean_seq_length": 1496.6578581363003, + "min_seq_length": 1159, + "max_seq_length": 2206, + "max_ctx_length": 480, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 0.07649513212795549 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1429, + "non_truncated": 0, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 4287, + "mean_seq_length": 1630.7963610916725, + "min_seq_length": 1354, + "max_seq_length": 3132, + "max_ctx_length": 480, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 0.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 650, + "non_truncated": 0, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 8125, + "mean_seq_length": 1630.416923076923, + "min_seq_length": 1572, + "max_seq_length": 1757, + "max_ctx_length": 480, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 2.5 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 1400, + "non_truncated": 0, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 23889, + "mean_seq_length": 1195.195, + "min_seq_length": 1173, + "max_seq_length": 1455, + "max_ctx_length": 480, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 7.936428571428571 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 2195, + "non_truncated": 0, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 6497, + "mean_seq_length": 1402.3845102505695, + "min_seq_length": 1120, + "max_seq_length": 1940, + "max_ctx_length": 480, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 0.04009111617312073 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 851, + "non_truncated": 0, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 17025, + "mean_seq_length": 1637.5487661574618, + "min_seq_length": 1602, + "max_seq_length": 1668, + "max_ctx_length": 480, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 4.994124559341951 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 2010, + "non_truncated": 0, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 34184, + "mean_seq_length": 1439.581592039801, + "min_seq_length": 1420, + "max_seq_length": 1472, + "max_ctx_length": 480, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 7.993034825870647 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 851, - "non_truncated": 0, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 17025, - "mean_seq_length": 1637.5487661574618, - "min_seq_length": 1602, - "max_seq_length": 1668, - "max_ctx_length": 480, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 4.994124559341951 + "config": { + "model": "huggingface", + "model_args": "pretrained=openai-community/openai-gpt,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 2010, - "non_truncated": 0, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 34184, - "mean_seq_length": 1439.581592039801, - "min_seq_length": 1420, - "max_seq_length": 1472, - "max_ctx_length": 480, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 7.993034825870647 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=openai-community/openai-gpt,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/openai-community/openai-gpt/results_2024-04-16T11-44-49.742462.json b/openai-community/openai-gpt/results_2024-04-16T11-44-49.742462.json index 143192123e72ce94314b88f401e768ec2118b441..69624bdcecfdc1cdd7642f8ffb3f69d1c4212177 100644 --- a/openai-community/openai-gpt/results_2024-04-16T11-44-49.742462.json +++ b/openai-community/openai-gpt/results_2024-04-16T11-44-49.742462.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.015841039696084636, - "all_grouped_npm": -0.547279116155147, + "all_grouped_average": 0.01963793424230154, + "all_grouped_npm": -0.5414868374325388, "all_grouped": { "enem_challenge": 0.0, "bluex": 0.0, "oab_exams": 0.0, - "assin2_rte": 0.002171552660152009, + "assin2_rte": 0.003257328990228013, "assin2_sts": 0.042727458076029255, "faquad_nli": 0.0, - "hatebr_offensive": 0.0009510223490252021, - "portuguese_hate_speech": 0.0022259321090706734, - "tweetsentbr": 0.09449339207048459 + "hatebr_offensive": 0.0014265335235378032, + "portuguese_hate_speech": 0.0033388981636060097, + "tweetsentbr": 0.12599118942731277 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.0, "harness|bluex|bluex|None|3": 0.0, "harness|oab_exams|oab_exams|None|3": 0.0, - "harness|assin2_rte|assin2_rte|None|15": 0.002171552660152009, + "harness|assin2_rte|assin2_rte|None|15": 0.003257328990228013, "harness|assin2_sts|assin2_sts|None|15": 0.042727458076029255, "harness|faquad_nli|faquad_nli|None|15": 0.0, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.0009510223490252021, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0022259321090706734, - "harness|tweetsentbr|tweetsentbr|None|25": 0.09449339207048459 + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.0014265335235378032, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0033388981636060097, + "harness|tweetsentbr|tweetsentbr|None|25": 0.12599118942731277 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.0, @@ -125,9 +125,9 @@ "main_score": 0.0 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.002171552660152009, + "f1_macro,all": 0.003257328990228013, "acc,all": 0.0016339869281045752, - "main_score": 0.002171552660152009 + "main_score": 0.003257328990228013 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.042727458076029255, @@ -140,19 +140,19 @@ "main_score": 0.0 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.0009510223490252021, + "f1_macro,all": 0.0014265335235378032, "acc,all": 0.0007142857142857143, - "main_score": 0.0009510223490252021 + "main_score": 0.0014265335235378032 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.0022259321090706734, + "f1_macro,all": 0.0033388981636060097, "acc,all": 0.0023501762632197414, - "main_score": 0.0022259321090706734 + "main_score": 0.0033388981636060097 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.09449339207048459, + "f1_macro,all": 0.12599118942731277, "acc,all": 0.21343283582089553, - "main_score": 0.09449339207048459 + "main_score": 0.12599118942731277 } }, "config_tasks": { diff --git a/openchat/openchat-3.5-0106/raw_2024-02-21T21-51-39.369217/results.json b/openchat/openchat-3.5-0106/raw_2024-02-21T21-51-39.369217/results.json index d1fd6814f32c9626bddad13d8623d5af8e9d53b2..c9307a77521e9ab70355a7c2726d4d5d75a21a64 100644 --- a/openchat/openchat-3.5-0106/raw_2024-02-21T21-51-39.369217/results.json +++ b/openchat/openchat-3.5-0106/raw_2024-02-21T21-51-39.369217/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9272805921368836, - "acc,all": 0.9272875816993464, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8316751955429375, - "mse,all": 0.36165441176470575, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5271210013908206, - "acc,exam_id__USP_2021": 0.5384615384615384, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2023": 0.4883720930232558, - "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__USP_2022": 0.6326530612244898, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2020": 0.5357142857142857, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2024": 0.5111111111111111, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6438068579426172, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2009": 0.6, - "acc,exam_id__2011": 0.717948717948718, - "acc,exam_id__2012": 0.646551724137931, - "acc,exam_id__2013": 0.6574074074074074, - "acc,exam_id__2016": 0.5950413223140496, - "acc,exam_id__2022": 0.6616541353383458, - "acc,exam_id__2023": 0.6592592592592592, - "acc,exam_id__2010": 0.6410256410256411, - "acc,exam_id__2014": 0.6055045871559633, - "acc,exam_id__2015": 0.6050420168067226, - "acc,exam_id__2017": 0.6810344827586207 - }, - "faquad_nli": { - "f1_macro,all": 0.7920758364261121, - "acc,all": 0.8569230769230769, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7930509518656714, - "acc,all": 0.7992857142857143 - }, - "oab_exams": { - "acc,all": 0.44874715261959, - "acc,exam_id__2010-02": 0.45, - "acc,exam_id__2016-19": 0.5384615384615384, - "acc,exam_id__2015-17": 0.5641025641025641, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2017-24": 0.3875, - "acc,exam_id__2012-09": 0.4025974025974026, - "acc,exam_id__2011-04": 0.35, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2011-03": 0.37373737373737376, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2014-13": 0.3875, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2015-18": 0.4875, - "acc,exam_id__2014-15": 0.5256410256410257, - "acc,exam_id__2018-25": 0.4625, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2013-11": 0.425, - "acc,exam_id__2014-14": 0.575, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2010-01": 0.4470588235294118, - "acc,exam_id__2013-12": 0.5, - "acc,exam_id__2015-16": 0.425, - "acc,exam_id__2012-06a": 0.5, - "acc,exam_id__2016-20": 0.4625, - "acc,exam_id__2012-08": 0.425, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.718759501368197, - "acc,all": 0.7649823736780259 - }, - "tweetsentbr": { - "f1_macro,all": 0.5000062386719876, - "acc,all": 0.7029850746268657, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9272805921368836, + "acc,all": 0.9272875816993464, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8316751955429375, + "mse,all": 0.36165441176470575, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5271210013908206, + "acc,exam_id__USP_2021": 0.5384615384615384, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2023": 0.4883720930232558, + "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__USP_2022": 0.6326530612244898, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2020": 0.5357142857142857, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2024": 0.5111111111111111, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6438068579426172, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2009": 0.6, + "acc,exam_id__2011": 0.717948717948718, + "acc,exam_id__2012": 0.646551724137931, + "acc,exam_id__2013": 0.6574074074074074, + "acc,exam_id__2016": 0.5950413223140496, + "acc,exam_id__2022": 0.6616541353383458, + "acc,exam_id__2023": 0.6592592592592592, + "acc,exam_id__2010": 0.6410256410256411, + "acc,exam_id__2014": 0.6055045871559633, + "acc,exam_id__2015": 0.6050420168067226, + "acc,exam_id__2017": 0.6810344827586207 + }, + "faquad_nli": { + "f1_macro,all": 0.7920758364261121, + "acc,all": 0.8569230769230769, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7930509518656714, + "acc,all": 0.7992857142857143 + }, + "oab_exams": { + "acc,all": 0.44874715261959, + "acc,exam_id__2010-02": 0.45, + "acc,exam_id__2016-19": 0.5384615384615384, + "acc,exam_id__2015-17": 0.5641025641025641, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2017-24": 0.3875, + "acc,exam_id__2012-09": 0.4025974025974026, + "acc,exam_id__2011-04": 0.35, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2011-03": 0.37373737373737376, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2014-13": 0.3875, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2015-18": 0.4875, + "acc,exam_id__2014-15": 0.5256410256410257, + "acc,exam_id__2018-25": 0.4625, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2013-11": 0.425, + "acc,exam_id__2014-14": 0.575, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2010-01": 0.4470588235294118, + "acc,exam_id__2013-12": 0.5, + "acc,exam_id__2015-16": 0.425, + "acc,exam_id__2012-06a": 0.5, + "acc,exam_id__2016-20": 0.4625, + "acc,exam_id__2012-08": 0.425, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.718759501368197, + "acc,all": 0.7649823736780259 + }, + "tweetsentbr": { + "f1_macro,all": 0.6666749848959834, + "acc,all": 0.7029850746268657, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "9619fb7d2a8e25fa6b0633c0f57f7f4aa79b45c4", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 14617722880, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1584.7455065359477, - "min_seq_length": 1561, - "max_seq_length": 1651, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1824.7455065359477, - "min_seq_length": 1801, - "max_seq_length": 1891, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1782.9262865090404, - "min_seq_length": 1406, - "max_seq_length": 2583, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1683.039188243527, - "min_seq_length": 1417, - "max_seq_length": 2681, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1825.9876923076922, - "min_seq_length": 1770, - "max_seq_length": 1946, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1676.3878571428572, - "min_seq_length": 1653, - "max_seq_length": 1927, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "9619fb7d2a8e25fa6b0633c0f57f7f4aa79b45c4", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 14617722880, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1428.764464692483, - "min_seq_length": 1162, - "max_seq_length": 1931, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1584.7455065359477, + "min_seq_length": 1561, + "max_seq_length": 1651, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1824.7455065359477, + "min_seq_length": 1801, + "max_seq_length": 1891, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1782.9262865090404, + "min_seq_length": 1406, + "max_seq_length": 2583, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1683.039188243527, + "min_seq_length": 1417, + "max_seq_length": 2681, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1825.9876923076922, + "min_seq_length": 1770, + "max_seq_length": 1946, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1676.3878571428572, + "min_seq_length": 1653, + "max_seq_length": 1927, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1428.764464692483, + "min_seq_length": 1162, + "max_seq_length": 1931, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2177.3360752056406, + "min_seq_length": 2142, + "max_seq_length": 2216, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1923.2492537313433, + "min_seq_length": 1902, + "max_seq_length": 2018, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2177.3360752056406, - "min_seq_length": 2142, - "max_seq_length": 2216, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=openchat/openchat-3.5-0106,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1923.2492537313433, - "min_seq_length": 1902, - "max_seq_length": 2018, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=openchat/openchat-3.5-0106,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/openchat/openchat-3.5-0106/results_2024-02-21T21-51-39.369217.json b/openchat/openchat-3.5-0106/results_2024-02-21T21-51-39.369217.json index c44b0a4922507759d9274d631dcf19b14a0ca4d5..d632244387b93fb0096203547cfd8646344145f3 100644 --- a/openchat/openchat-3.5-0106/results_2024-02-21T21-51-39.369217.json +++ b/openchat/openchat-3.5-0106/results_2024-02-21T21-51-39.369217.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6869470364405353, - "all_grouped_npm": 0.5350961910969678, + "all_grouped_average": 0.7054657860209793, + "all_grouped_npm": 0.5626538541631048, "all_grouped": { "enem_challenge": 0.6438068579426172, "bluex": 0.5271210013908206, @@ -45,7 +45,7 @@ "faquad_nli": 0.7920758364261121, "hatebr_offensive": 0.7930509518656714, "portuguese_hate_speech": 0.718759501368197, - "tweetsentbr": 0.5000062386719876 + "tweetsentbr": 0.6666749848959834 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6438068579426172, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7920758364261121, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7930509518656714, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.718759501368197, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5000062386719876 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6666749848959834 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6438068579426172, @@ -150,9 +150,9 @@ "main_score": 0.718759501368197 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5000062386719876, + "f1_macro,all": 0.6666749848959834, "acc,all": 0.7029850746268657, - "main_score": 0.5000062386719876 + "main_score": 0.6666749848959834 } }, "config_tasks": { diff --git a/paulml/OGNO-7B/raw_2024-02-26T21-46-38.735296/results.json b/paulml/OGNO-7B/raw_2024-02-26T21-46-38.735296/results.json index 40550ecf6fa8db58dab6a47a27e179fae6bbfaaa..fce403ddf1bb87f25b708090442e5fef13e7dc5c 100644 --- a/paulml/OGNO-7B/raw_2024-02-26T21-46-38.735296/results.json +++ b/paulml/OGNO-7B/raw_2024-02-26T21-46-38.735296/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9195202213253991, - "acc,all": 0.9195261437908496, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.777306832779747, - "mse,all": 0.4300085821078431, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5438108484005564, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__USP_2020": 0.5, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__USP_2023": 0.5909090909090909, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__USP_2019": 0.45, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.635409377186844, - "acc,exam_id__2011": 0.6666666666666666, - "acc,exam_id__2016": 0.5702479338842975, - "acc,exam_id__2012": 0.6120689655172413, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2023": 0.6444444444444445, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2009": 0.6434782608695652, - "acc,exam_id__2016_2": 0.6097560975609756, - "acc,exam_id__2022": 0.6090225563909775 - }, - "faquad_nli": { - "f1_macro,all": 0.7852659398744631, - "acc,all": 0.8507692307692307, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8146674347869539, - "acc,all": 0.8192857142857143 - }, - "oab_exams": { - "acc,all": 0.41776765375854213, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2015-16": 0.3625, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2012-08": 0.425, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2014-13": 0.3125, - "acc,exam_id__2016-20": 0.375, - "acc,exam_id__2015-17": 0.5128205128205128, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2017-23": 0.4375, - "acc,exam_id__2015-18": 0.3875, - "acc,exam_id__2018-25": 0.4625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7008767223734909, - "acc,all": 0.754406580493537 - }, - "tweetsentbr": { - "f1_macro,all": 0.49222624056760605, - "acc,all": 0.7059701492537314, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9195202213253991, + "acc,all": 0.9195261437908496, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.777306832779747, + "mse,all": 0.4300085821078431, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5438108484005564, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__USP_2020": 0.5, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__USP_2023": 0.5909090909090909, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__USP_2019": 0.45, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.635409377186844, + "acc,exam_id__2011": 0.6666666666666666, + "acc,exam_id__2016": 0.5702479338842975, + "acc,exam_id__2012": 0.6120689655172413, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2023": 0.6444444444444445, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2009": 0.6434782608695652, + "acc,exam_id__2016_2": 0.6097560975609756, + "acc,exam_id__2022": 0.6090225563909775 + }, + "faquad_nli": { + "f1_macro,all": 0.7852659398744631, + "acc,all": 0.8507692307692307, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8146674347869539, + "acc,all": 0.8192857142857143 + }, + "oab_exams": { + "acc,all": 0.41776765375854213, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2015-16": 0.3625, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2012-08": 0.425, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2014-13": 0.3125, + "acc,exam_id__2016-20": 0.375, + "acc,exam_id__2015-17": 0.5128205128205128, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2017-23": 0.4375, + "acc,exam_id__2015-18": 0.3875, + "acc,exam_id__2018-25": 0.4625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7008767223734909, + "acc,all": 0.754406580493537 + }, + "tweetsentbr": { + "f1_macro,all": 0.6563016540901414, + "acc,all": 0.7059701492537314, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "a5d97f2e6962dc2c539a5bbca6a1160f87ccce84", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "a5d97f2e6962dc2c539a5bbca6a1160f87ccce84", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=paulml/OGNO-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=paulml/OGNO-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/paulml/OGNO-7B/results_2024-02-26T21-46-38.735296.json b/paulml/OGNO-7B/results_2024-02-26T21-46-38.735296.json index 45c4500e0929b3973b0a633f78b30cff6c87e423..a883dfa57f708ed8e044cbffb805536c6112afb6 100644 --- a/paulml/OGNO-7B/results_2024-02-26T21-46-38.735296.json +++ b/paulml/OGNO-7B/results_2024-02-26T21-46-38.735296.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6763168078948447, - "all_grouped_npm": 0.52228027733777, + "all_grouped_average": 0.6945474093973486, + "all_grouped_npm": 0.5494091486212581, "all_grouped": { "enem_challenge": 0.635409377186844, "bluex": 0.5438108484005564, @@ -45,7 +45,7 @@ "faquad_nli": 0.7852659398744631, "hatebr_offensive": 0.8146674347869539, "portuguese_hate_speech": 0.7008767223734909, - "tweetsentbr": 0.49222624056760605 + "tweetsentbr": 0.6563016540901414 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.635409377186844, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7852659398744631, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8146674347869539, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7008767223734909, - "harness|tweetsentbr|tweetsentbr|None|25": 0.49222624056760605 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6563016540901414 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.635409377186844, @@ -150,9 +150,9 @@ "main_score": 0.7008767223734909 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.49222624056760605, + "f1_macro,all": 0.6563016540901414, "acc,all": 0.7059701492537314, - "main_score": 0.49222624056760605 + "main_score": 0.6563016540901414 } }, "config_tasks": { diff --git a/princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2024-08-11T19-34-03.542478/results.json b/princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2024-08-11T19-34-03.542478/results.json index d487fc55c48efbb99693a4e1d3f1364ba48e9da7..d4c6b2c982200f2410398e3f2742da9085ee1b36 100644 --- a/princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2024-08-11T19-34-03.542478/results.json +++ b/princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2024-08-11T19-34-03.542478/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.597638648977482, - "acc,all": 0.8905228758169934, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6991081985044032, - "mse,all": 1.1034438985734072, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5591098748261474, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__USP_2022": 0.5918367346938775, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2020": 0.6181818181818182, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__UNICAMP_2023": 0.6744186046511628, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2019": 0.55, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6578026592022393, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2010": 0.6752136752136753, - "acc,exam_id__2009": 0.6782608695652174, - "acc,exam_id__2015": 0.6890756302521008, - "acc,exam_id__2017": 0.6896551724137931, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2022": 0.631578947368421, - "acc,exam_id__2012": 0.6637931034482759, - "acc,exam_id__2023": 0.6592592592592592, - "acc,exam_id__2016_2": 0.6178861788617886, - "acc,exam_id__2016": 0.5867768595041323, - "acc,exam_id__2013": 0.6666666666666666 - }, - "faquad_nli": { - "f1_macro,all": 0.4433035088772794, - "acc,all": 0.6892307692307692, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.5755007995858538, - "acc,all": 0.86 - }, - "oab_exams": { - "acc,all": 0.4733485193621868, - "acc,exam_id__2017-24": 0.4625, - "acc,exam_id__2013-12": 0.525, - "acc,exam_id__2015-18": 0.4875, - "acc,exam_id__2018-25": 0.5, - "acc,exam_id__2016-19": 0.5769230769230769, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2016-20": 0.5125, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2011-04": 0.45, - "acc,exam_id__2014-15": 0.6282051282051282, - "acc,exam_id__2010-02": 0.46, - "acc,exam_id__2013-10": 0.4, - "acc,exam_id__2012-07": 0.5, - "acc,exam_id__2015-17": 0.5769230769230769, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2012-09": 0.4025974025974026, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2012-08": 0.4625, - "acc,exam_id__2011-03": 0.3939393939393939, - "acc,exam_id__2016-20a": 0.3875, - "acc,exam_id__2014-13": 0.4125, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2012-06": 0.475, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2014-14": 0.55, - "acc,exam_id__2012-06a": 0.5125, - "acc,exam_id__2017-22": 0.525, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.46074959929153286, - "acc,all": 0.7050528789659224 - }, - "tweetsentbr": { - "f1_macro,all": 0.4887195992623018, - "acc,all": 0.7089552238805971, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.896457973466223, + "acc,all": 0.8905228758169934, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6991081985044032, + "mse,all": 1.1034438985734072, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5591098748261474, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__USP_2022": 0.5918367346938775, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2020": 0.6181818181818182, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__UNICAMP_2023": 0.6744186046511628, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2019": 0.55, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6578026592022393, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2010": 0.6752136752136753, + "acc,exam_id__2009": 0.6782608695652174, + "acc,exam_id__2015": 0.6890756302521008, + "acc,exam_id__2017": 0.6896551724137931, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2022": 0.631578947368421, + "acc,exam_id__2012": 0.6637931034482759, + "acc,exam_id__2023": 0.6592592592592592, + "acc,exam_id__2016_2": 0.6178861788617886, + "acc,exam_id__2016": 0.5867768595041323, + "acc,exam_id__2013": 0.6666666666666666 + }, + "faquad_nli": { + "f1_macro,all": 0.664955263315919, + "acc,all": 0.6892307692307692, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8632511993787806, + "acc,all": 0.86 + }, + "oab_exams": { + "acc,all": 0.4733485193621868, + "acc,exam_id__2017-24": 0.4625, + "acc,exam_id__2013-12": 0.525, + "acc,exam_id__2015-18": 0.4875, + "acc,exam_id__2018-25": 0.5, + "acc,exam_id__2016-19": 0.5769230769230769, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2016-20": 0.5125, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2011-04": 0.45, + "acc,exam_id__2014-15": 0.6282051282051282, + "acc,exam_id__2010-02": 0.46, + "acc,exam_id__2013-10": 0.4, + "acc,exam_id__2012-07": 0.5, + "acc,exam_id__2015-17": 0.5769230769230769, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2012-09": 0.4025974025974026, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2012-08": 0.4625, + "acc,exam_id__2011-03": 0.3939393939393939, + "acc,exam_id__2016-20a": 0.3875, + "acc,exam_id__2014-13": 0.4125, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2012-06": 0.475, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2014-14": 0.55, + "acc,exam_id__2012-06a": 0.5125, + "acc,exam_id__2017-22": 0.525, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6911243989372992, + "acc,all": 0.7050528789659224 + }, + "tweetsentbr": { + "f1_macro,all": 0.6516261323497358, + "acc,all": 0.7089552238805971, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2b18bdce3e3e8f588f48ff62350db9620ceefde3", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530944, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2b18bdce3e3e8f588f48ff62350db9620ceefde3", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530944, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/results_2024-08-11T19-34-03.542478.json b/princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/results_2024-08-11T19-34-03.542478.json index 5cc5e8df4494dacb8a62b5d4188c123e82c8871e..4742c5b30d2f813899df4afe372eb603dfc76702 100644 --- a/princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/results_2024-08-11T19-34-03.542478.json +++ b/princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/results_2024-08-11T19-34-03.542478.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5505868230988251, - "all_grouped_npm": 0.2808161481879235, + "all_grouped_average": 0.6840871354825482, + "all_grouped_npm": 0.5325034905256527, "all_grouped": { "enem_challenge": 0.6578026592022393, "bluex": 0.5591098748261474, "oab_exams": 0.4733485193621868, - "assin2_rte": 0.597638648977482, + "assin2_rte": 0.896457973466223, "assin2_sts": 0.6991081985044032, - "faquad_nli": 0.4433035088772794, - "hatebr_offensive": 0.5755007995858538, - "portuguese_hate_speech": 0.46074959929153286, - "tweetsentbr": 0.4887195992623018 + "faquad_nli": 0.664955263315919, + "hatebr_offensive": 0.8632511993787806, + "portuguese_hate_speech": 0.6911243989372992, + "tweetsentbr": 0.6516261323497358 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6578026592022393, "harness|bluex|bluex|None|3": 0.5591098748261474, "harness|oab_exams|oab_exams|None|3": 0.4733485193621868, - "harness|assin2_rte|assin2_rte|None|15": 0.597638648977482, + "harness|assin2_rte|assin2_rte|None|15": 0.896457973466223, "harness|assin2_sts|assin2_sts|None|15": 0.6991081985044032, - "harness|faquad_nli|faquad_nli|None|15": 0.4433035088772794, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5755007995858538, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.46074959929153286, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4887195992623018 + "harness|faquad_nli|faquad_nli|None|15": 0.664955263315919, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8632511993787806, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6911243989372992, + "harness|tweetsentbr|tweetsentbr|None|25": 0.6516261323497358 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6578026592022393, @@ -125,9 +125,9 @@ "main_score": 0.4733485193621868 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.597638648977482, + "f1_macro,all": 0.896457973466223, "acc,all": 0.8905228758169934, - "main_score": 0.597638648977482 + "main_score": 0.896457973466223 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.6991081985044032, @@ -135,24 +135,24 @@ "main_score": 0.6991081985044032 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.4433035088772794, + "f1_macro,all": 0.664955263315919, "acc,all": 0.6892307692307692, - "main_score": 0.4433035088772794 + "main_score": 0.664955263315919 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.5755007995858538, + "f1_macro,all": 0.8632511993787806, "acc,all": 0.86, - "main_score": 0.5755007995858538 + "main_score": 0.8632511993787806 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.46074959929153286, + "f1_macro,all": 0.6911243989372992, "acc,all": 0.7050528789659224, - "main_score": 0.46074959929153286 + "main_score": 0.6911243989372992 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4887195992623018, + "f1_macro,all": 0.6516261323497358, "acc,all": 0.7089552238805971, - "main_score": 0.4887195992623018 + "main_score": 0.6516261323497358 } }, "config_tasks": { diff --git a/princeton-nlp/Llama-3-Instruct-8B-SimPO/raw_2024-05-30T02-34-14.763780/results.json b/princeton-nlp/Llama-3-Instruct-8B-SimPO/raw_2024-05-30T02-34-14.763780/results.json index 70dd46c340fa0f20f3711f52a4edaa438900c56d..3ea97c1bfef9da8e5ec7315d8612c099bd8ff75d 100644 --- a/princeton-nlp/Llama-3-Instruct-8B-SimPO/raw_2024-05-30T02-34-14.763780/results.json +++ b/princeton-nlp/Llama-3-Instruct-8B-SimPO/raw_2024-05-30T02-34-14.763780/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9030211936375125, - "acc,all": 0.9031862745098039, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7249953009677698, - "mse,all": 0.9775247320261437, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5730180806675939, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__USP_2020": 0.5892857142857143, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__USP_2022": 0.6530612244897959, - "acc,exam_id__USP_2021": 0.5576923076923077, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__USP_2024": 0.6097560975609756, - "acc,exam_id__USP_2019": 0.5, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6585024492652204, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2012": 0.6551724137931034, - "acc,exam_id__2016": 0.6198347107438017, - "acc,exam_id__2016_2": 0.6097560975609756, - "acc,exam_id__2017": 0.6293103448275862, - "acc,exam_id__2009": 0.6608695652173913, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2010": 0.6837606837606838, - "acc,exam_id__2023": 0.7185185185185186, - "acc,exam_id__2015": 0.680672268907563, - "acc,exam_id__2022": 0.6165413533834586 - }, - "faquad_nli": { - "f1_macro,all": 0.5990940435384879, - "acc,all": 0.6246153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.831110379359638, - "acc,all": 0.8335714285714285 - }, - "oab_exams": { - "acc,all": 0.4847380410022779, - "acc,exam_id__2015-16": 0.4625, - "acc,exam_id__2012-06a": 0.55, - "acc,exam_id__2015-17": 0.5641025641025641, - "acc,exam_id__2014-14": 0.55, - "acc,exam_id__2015-18": 0.4875, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2016-19": 0.5897435897435898, - "acc,exam_id__2011-03": 0.43434343434343436, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2012-06": 0.5375, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2016-20a": 0.3875, - "acc,exam_id__2010-02": 0.46, - "acc,exam_id__2013-11": 0.5, - "acc,exam_id__2012-09": 0.42857142857142855, - "acc,exam_id__2014-15": 0.5769230769230769, - "acc,exam_id__2017-22": 0.5625, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2012-08": 0.4875, - "acc,exam_id__2014-13": 0.4125, - "acc,exam_id__2018-25": 0.5125, - "acc,exam_id__2013-10": 0.4875, - "acc,exam_id__2017-24": 0.45, - "acc,exam_id__2016-20": 0.5375, - "acc,exam_id__2013-12": 0.5125, - "acc,exam_id__2012-07": 0.5625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6961241160024121, - "acc,all": 0.7168037602820212 - }, - "tweetsentbr": { - "f1_macro,all": 0.5080264254093354, - "acc,all": 0.7258706467661692, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9030211936375125, + "acc,all": 0.9031862745098039, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7249953009677698, + "mse,all": 0.9775247320261437, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5730180806675939, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__USP_2020": 0.5892857142857143, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__USP_2022": 0.6530612244897959, + "acc,exam_id__USP_2021": 0.5576923076923077, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__USP_2024": 0.6097560975609756, + "acc,exam_id__USP_2019": 0.5, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6585024492652204, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2012": 0.6551724137931034, + "acc,exam_id__2016": 0.6198347107438017, + "acc,exam_id__2016_2": 0.6097560975609756, + "acc,exam_id__2017": 0.6293103448275862, + "acc,exam_id__2009": 0.6608695652173913, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2010": 0.6837606837606838, + "acc,exam_id__2023": 0.7185185185185186, + "acc,exam_id__2015": 0.680672268907563, + "acc,exam_id__2022": 0.6165413533834586 + }, + "faquad_nli": { + "f1_macro,all": 0.5990940435384879, + "acc,all": 0.6246153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.831110379359638, + "acc,all": 0.8335714285714285 + }, + "oab_exams": { + "acc,all": 0.4847380410022779, + "acc,exam_id__2015-16": 0.4625, + "acc,exam_id__2012-06a": 0.55, + "acc,exam_id__2015-17": 0.5641025641025641, + "acc,exam_id__2014-14": 0.55, + "acc,exam_id__2015-18": 0.4875, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2016-19": 0.5897435897435898, + "acc,exam_id__2011-03": 0.43434343434343436, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2012-06": 0.5375, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2016-20a": 0.3875, + "acc,exam_id__2010-02": 0.46, + "acc,exam_id__2013-11": 0.5, + "acc,exam_id__2012-09": 0.42857142857142855, + "acc,exam_id__2014-15": 0.5769230769230769, + "acc,exam_id__2017-22": 0.5625, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2012-08": 0.4875, + "acc,exam_id__2014-13": 0.4125, + "acc,exam_id__2018-25": 0.5125, + "acc,exam_id__2013-10": 0.4875, + "acc,exam_id__2017-24": 0.45, + "acc,exam_id__2016-20": 0.5375, + "acc,exam_id__2013-12": 0.5125, + "acc,exam_id__2012-07": 0.5625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6961241160024121, + "acc,all": 0.7168037602820212 + }, + "tweetsentbr": { + "f1_macro,all": 0.6773685672124472, + "acc,all": 0.7258706467661692, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "7e47ec66136ebcb299dd45d25f27332a35a18f2c", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 16060530688, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1169.5322712418301, - "min_seq_length": 1150, - "max_seq_length": 1233, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1375.5322712418301, - "min_seq_length": 1356, - "max_seq_length": 1439, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1443.769123783032, - "min_seq_length": 1124, - "max_seq_length": 2093, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1371.3547935619315, - "min_seq_length": 1146, - "max_seq_length": 2299, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1298.8215384615385, - "min_seq_length": 1253, - "max_seq_length": 1395, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "7e47ec66136ebcb299dd45d25f27332a35a18f2c", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 16060530688, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1040.3878571428572, - "min_seq_length": 1020, - "max_seq_length": 1259, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1179.3772209567198, - "min_seq_length": 947, - "max_seq_length": 1613, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1169.5322712418301, + "min_seq_length": 1150, + "max_seq_length": 1233, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1375.5322712418301, + "min_seq_length": 1356, + "max_seq_length": 1439, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1443.769123783032, + "min_seq_length": 1124, + "max_seq_length": 2093, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1371.3547935619315, + "min_seq_length": 1146, + "max_seq_length": 2299, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1298.8215384615385, + "min_seq_length": 1253, + "max_seq_length": 1395, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1040.3878571428572, + "min_seq_length": 1020, + "max_seq_length": 1259, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1179.3772209567198, + "min_seq_length": 947, + "max_seq_length": 1613, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1437.4195064629848, + "min_seq_length": 1407, + "max_seq_length": 1469, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1298.1537313432837, + "min_seq_length": 1281, + "max_seq_length": 1346, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1437.4195064629848, - "min_seq_length": 1407, - "max_seq_length": 1469, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=princeton-nlp/Llama-3-Instruct-8B-SimPO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1298.1537313432837, - "min_seq_length": 1281, - "max_seq_length": 1346, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=princeton-nlp/Llama-3-Instruct-8B-SimPO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/princeton-nlp/Llama-3-Instruct-8B-SimPO/results_2024-05-30T02-34-14.763780.json b/princeton-nlp/Llama-3-Instruct-8B-SimPO/results_2024-05-30T02-34-14.763780.json index 0713531fa7b9fb486b6d1713c850613852aa7279..31115abcb19ac9ecaf2bde25174bdcd73ecf9f15 100644 --- a/princeton-nlp/Llama-3-Instruct-8B-SimPO/results_2024-05-30T02-34-14.763780.json +++ b/princeton-nlp/Llama-3-Instruct-8B-SimPO/results_2024-05-30T02-34-14.763780.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6642922255389165, - "all_grouped_npm": 0.4973453425674803, + "all_grouped_average": 0.6831080190725956, + "all_grouped_npm": 0.5253450353259315, "all_grouped": { "enem_challenge": 0.6585024492652204, "bluex": 0.5730180806675939, @@ -45,7 +45,7 @@ "faquad_nli": 0.5990940435384879, "hatebr_offensive": 0.831110379359638, "portuguese_hate_speech": 0.6961241160024121, - "tweetsentbr": 0.5080264254093354 + "tweetsentbr": 0.6773685672124472 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6585024492652204, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.5990940435384879, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.831110379359638, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6961241160024121, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5080264254093354 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6773685672124472 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6585024492652204, @@ -150,9 +150,9 @@ "main_score": 0.6961241160024121 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5080264254093354, + "f1_macro,all": 0.6773685672124472, "acc,all": 0.7258706467661692, - "main_score": 0.5080264254093354 + "main_score": 0.6773685672124472 } }, "config_tasks": { diff --git a/recogna-nlp/bode-7b-alpaca-pt-br/raw_2024-02-17T00-17-03.515313/results.json b/recogna-nlp/bode-7b-alpaca-pt-br/raw_2024-02-17T00-17-03.515313/results.json index f126e0f94730ce442b9cc576dd71223783bf7a53..c52c79f0e7e95c6e5a5cb6737003a1007466e211 100644 --- a/recogna-nlp/bode-7b-alpaca-pt-br/raw_2024-02-17T00-17-03.515313/results.json +++ b/recogna-nlp/bode-7b-alpaca-pt-br/raw_2024-02-17T00-17-03.515313/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.7983437034736313, - "acc,all": 0.8006535947712419, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.4346888805285222, - "mse,all": 1.8064338235294115, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.28929068150208626, - "acc,exam_id__USP_2023": 0.20454545454545456, - "acc,exam_id__UNICAMP_2020": 0.36363636363636365, - "acc,exam_id__USP_2024": 0.12195121951219512, - "acc,exam_id__USP_2018": 0.2222222222222222, - "acc,exam_id__UNICAMP_2019": 0.3, - "acc,exam_id__USP_2021": 0.25, - "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, - "acc,exam_id__UNICAMP_2023": 0.3953488372093023, - "acc,exam_id__USP_2022": 0.1836734693877551, - "acc,exam_id__UNICAMP_2018": 0.3888888888888889, - "acc,exam_id__USP_2020": 0.26785714285714285, - "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__UNICAMP_2022": 0.41025641025641024, - "acc,exam_id__UNICAMP_2024": 0.37777777777777777, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.3435969209237229, - "acc,exam_id__2010": 0.27350427350427353, - "acc,exam_id__2011": 0.37606837606837606, - "acc,exam_id__2015": 0.3277310924369748, - "acc,exam_id__2022": 0.42105263157894735, - "acc,exam_id__2016": 0.3140495867768595, - "acc,exam_id__2009": 0.28695652173913044, - "acc,exam_id__2017": 0.3706896551724138, - "acc,exam_id__2013": 0.28703703703703703, - "acc,exam_id__2016_2": 0.43089430894308944, - "acc,exam_id__2012": 0.33620689655172414, - "acc,exam_id__2023": 0.362962962962963, - "acc,exam_id__2014": 0.3119266055045872 - }, - "faquad_nli": { - "f1_macro,all": 0.6744821306624175, - "acc,all": 0.7461538461538462, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.850631294632579, - "acc,all": 0.8507142857142858 - }, - "oab_exams": { - "acc,all": 0.3084282460136674, - "acc,exam_id__2015-18": 0.3375, - "acc,exam_id__2011-03": 0.30303030303030304, - "acc,exam_id__2017-22": 0.4, - "acc,exam_id__2012-06": 0.3125, - "acc,exam_id__2010-02": 0.38, - "acc,exam_id__2016-19": 0.3333333333333333, - "acc,exam_id__2012-09": 0.2727272727272727, - "acc,exam_id__2017-23": 0.35, - "acc,exam_id__2013-11": 0.3375, - "acc,exam_id__2010-01": 0.25882352941176473, - "acc,exam_id__2011-04": 0.3125, - "acc,exam_id__2014-14": 0.3, - "acc,exam_id__2018-25": 0.2625, - "acc,exam_id__2016-20a": 0.225, - "acc,exam_id__2014-15": 0.32051282051282054, - "acc,exam_id__2011-05": 0.2375, - "acc,exam_id__2015-17": 0.32051282051282054, - "acc,exam_id__2012-06a": 0.3125, - "acc,exam_id__2013-12": 0.25, - "acc,exam_id__2012-07": 0.3375, - "acc,exam_id__2012-08": 0.3875, - "acc,exam_id__2017-24": 0.275, - "acc,exam_id__2014-13": 0.1875, - "acc,exam_id__2016-21": 0.35, - "acc,exam_id__2016-20": 0.3375, - "acc,exam_id__2015-16": 0.3375, - "acc,exam_id__2013-10": 0.275, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6572613124851931, - "acc,all": 0.6803760282021152 - }, - "tweetsentbr": { - "f1_macro,all": 0.4325035290687774, - "acc,all": 0.6388059701492538, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.7983437034736313, + "acc,all": 0.8006535947712419, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.4346888805285222, + "mse,all": 1.8064338235294115, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.28929068150208626, + "acc,exam_id__USP_2023": 0.20454545454545456, + "acc,exam_id__UNICAMP_2020": 0.36363636363636365, + "acc,exam_id__USP_2024": 0.12195121951219512, + "acc,exam_id__USP_2018": 0.2222222222222222, + "acc,exam_id__UNICAMP_2019": 0.3, + "acc,exam_id__USP_2021": 0.25, + "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, + "acc,exam_id__UNICAMP_2023": 0.3953488372093023, + "acc,exam_id__USP_2022": 0.1836734693877551, + "acc,exam_id__UNICAMP_2018": 0.3888888888888889, + "acc,exam_id__USP_2020": 0.26785714285714285, + "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__UNICAMP_2022": 0.41025641025641024, + "acc,exam_id__UNICAMP_2024": 0.37777777777777777, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.3435969209237229, + "acc,exam_id__2010": 0.27350427350427353, + "acc,exam_id__2011": 0.37606837606837606, + "acc,exam_id__2015": 0.3277310924369748, + "acc,exam_id__2022": 0.42105263157894735, + "acc,exam_id__2016": 0.3140495867768595, + "acc,exam_id__2009": 0.28695652173913044, + "acc,exam_id__2017": 0.3706896551724138, + "acc,exam_id__2013": 0.28703703703703703, + "acc,exam_id__2016_2": 0.43089430894308944, + "acc,exam_id__2012": 0.33620689655172414, + "acc,exam_id__2023": 0.362962962962963, + "acc,exam_id__2014": 0.3119266055045872 + }, + "faquad_nli": { + "f1_macro,all": 0.6744821306624175, + "acc,all": 0.7461538461538462, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.850631294632579, + "acc,all": 0.8507142857142858 + }, + "oab_exams": { + "acc,all": 0.3084282460136674, + "acc,exam_id__2015-18": 0.3375, + "acc,exam_id__2011-03": 0.30303030303030304, + "acc,exam_id__2017-22": 0.4, + "acc,exam_id__2012-06": 0.3125, + "acc,exam_id__2010-02": 0.38, + "acc,exam_id__2016-19": 0.3333333333333333, + "acc,exam_id__2012-09": 0.2727272727272727, + "acc,exam_id__2017-23": 0.35, + "acc,exam_id__2013-11": 0.3375, + "acc,exam_id__2010-01": 0.25882352941176473, + "acc,exam_id__2011-04": 0.3125, + "acc,exam_id__2014-14": 0.3, + "acc,exam_id__2018-25": 0.2625, + "acc,exam_id__2016-20a": 0.225, + "acc,exam_id__2014-15": 0.32051282051282054, + "acc,exam_id__2011-05": 0.2375, + "acc,exam_id__2015-17": 0.32051282051282054, + "acc,exam_id__2012-06a": 0.3125, + "acc,exam_id__2013-12": 0.25, + "acc,exam_id__2012-07": 0.3375, + "acc,exam_id__2012-08": 0.3875, + "acc,exam_id__2017-24": 0.275, + "acc,exam_id__2014-13": 0.1875, + "acc,exam_id__2016-21": 0.35, + "acc,exam_id__2016-20": 0.3375, + "acc,exam_id__2015-16": 0.3375, + "acc,exam_id__2013-10": 0.275, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6572613124851931, + "acc,all": 0.6803760282021152 + }, + "tweetsentbr": { + "f1_macro,all": 0.5766713720917033, + "acc,all": 0.6388059701492538, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c1b0db933684edbfe29a06fa47eb19cc48025e93", - "model_dtype": "torch.float16", - "model_memory_footprint": 13552336896, - "model_num_parameters": 6742609920, - "model_is_loaded_in_4bit": false, - "model_is_loaded_in_8bit": false, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1426.9889705882354, - "min_seq_length": 1404, - "max_seq_length": 1493, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1665.9889705882354, - "min_seq_length": 1643, - "max_seq_length": 1732, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1710.7426981919332, - "min_seq_length": 1344, - "max_seq_length": 2470, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1589.9881035689293, - "min_seq_length": 1337, - "max_seq_length": 2629, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1649.1184615384616, - "min_seq_length": 1597, - "max_seq_length": 1756, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1486.9178571428572, - "min_seq_length": 1463, - "max_seq_length": 1733, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c1b0db933684edbfe29a06fa47eb19cc48025e93", + "model_dtype": "torch.float16", + "model_memory_footprint": 13552336896, + "model_num_parameters": 6742609920, + "model_is_loaded_in_4bit": false, + "model_is_loaded_in_8bit": false, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1362.4145785876992, - "min_seq_length": 1107, - "max_seq_length": 1844, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1426.9889705882354, + "min_seq_length": 1404, + "max_seq_length": 1493, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1665.9889705882354, + "min_seq_length": 1643, + "max_seq_length": 1732, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1710.7426981919332, + "min_seq_length": 1344, + "max_seq_length": 2470, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1589.9881035689293, + "min_seq_length": 1337, + "max_seq_length": 2629, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1649.1184615384616, + "min_seq_length": 1597, + "max_seq_length": 1756, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1486.9178571428572, + "min_seq_length": 1463, + "max_seq_length": 1733, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1362.4145785876992, + "min_seq_length": 1107, + "max_seq_length": 1844, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1978.801410105758, + "min_seq_length": 1944, + "max_seq_length": 2022, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1745.6845771144278, + "min_seq_length": 1724, + "max_seq_length": 1863, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1978.801410105758, - "min_seq_length": 1944, - "max_seq_length": 2022, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=meta-llama/Llama-2-7b-chat-hf,peft=recogna-nlp/bode-7b-alpaca-pt-br,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1745.6845771144278, - "min_seq_length": 1724, - "max_seq_length": 1863, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=meta-llama/Llama-2-7b-chat-hf,peft=recogna-nlp/bode-7b-alpaca-pt-br,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/recogna-nlp/bode-7b-alpaca-pt-br/results_2024-02-17T00-17-03.515313.json b/recogna-nlp/bode-7b-alpaca-pt-br/results_2024-02-17T00-17-03.515313.json index 02ccb21e3ca3ced5bb30c194e64b4a3780b139ed..20689a343afa26991f538bfc0f2e40d08ba1aeab 100644 --- a/recogna-nlp/bode-7b-alpaca-pt-br/results_2024-02-17T00-17-03.515313.json +++ b/recogna-nlp/bode-7b-alpaca-pt-br/results_2024-02-17T00-17-03.515313.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5321362999211775, - "all_grouped_npm": 0.3302533414364077, + "all_grouped_average": 0.548154949145947, + "all_grouped_npm": 0.35409061706850525, "all_grouped": { "enem_challenge": 0.3435969209237229, "bluex": 0.28929068150208626, @@ -45,7 +45,7 @@ "faquad_nli": 0.6744821306624175, "hatebr_offensive": 0.850631294632579, "portuguese_hate_speech": 0.6572613124851931, - "tweetsentbr": 0.4325035290687774 + "tweetsentbr": 0.5766713720917033 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.3435969209237229, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6744821306624175, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.850631294632579, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6572613124851931, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4325035290687774 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5766713720917033 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.3435969209237229, @@ -150,9 +150,9 @@ "main_score": 0.6572613124851931 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4325035290687774, + "f1_macro,all": 0.5766713720917033, "acc,all": 0.6388059701492538, - "main_score": 0.4325035290687774 + "main_score": 0.5766713720917033 } }, "config_tasks": { diff --git a/recogna-nlp/mistralbode_7b_qlora_ultraalpaca/raw_2024-04-23T20-19-47.179024/results.json b/recogna-nlp/mistralbode_7b_qlora_ultraalpaca/raw_2024-04-23T20-19-47.179024/results.json index 9dab46e5acb29ca2f5589c4c06035c2a29659bf3..5f42f3132e381ef6142bc0afbd7079a894e34b46 100644 --- a/recogna-nlp/mistralbode_7b_qlora_ultraalpaca/raw_2024-04-23T20-19-47.179024/results.json +++ b/recogna-nlp/mistralbode_7b_qlora_ultraalpaca/raw_2024-04-23T20-19-47.179024/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8891714080476152, - "acc,all": 0.8897058823529411, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7636816610855701, - "mse,all": 0.5976552287581699, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.4714881780250348, - "acc,exam_id__USP_2018": 0.3148148148148148, - "acc,exam_id__USP_2019": 0.375, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__UNICAMP_2020": 0.509090909090909, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__USP_2022": 0.42857142857142855, - "acc,exam_id__UNICAMP_2022": 0.5128205128205128, - "acc,exam_id__UNICAMP_2019": 0.5, - "acc,exam_id__USP_2021": 0.4423076923076923, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__UNICAMP_2021_2": 0.45098039215686275, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__UNICAMP_2023": 0.5116279069767442, - "acc,exam_id__USP_2023": 0.5454545454545454, - "acc,exam_id__USP_2024": 0.6341463414634146, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5682295311406578, - "acc,exam_id__2013": 0.5555555555555556, - "acc,exam_id__2015": 0.5798319327731093, - "acc,exam_id__2016": 0.5289256198347108, - "acc,exam_id__2014": 0.5871559633027523, - "acc,exam_id__2012": 0.5517241379310345, - "acc,exam_id__2009": 0.5478260869565217, - "acc,exam_id__2016_2": 0.6178861788617886, - "acc,exam_id__2022": 0.5639097744360902, - "acc,exam_id__2017": 0.5086206896551724, - "acc,exam_id__2011": 0.6153846153846154, - "acc,exam_id__2023": 0.6222222222222222, - "acc,exam_id__2010": 0.5299145299145299 - }, - "faquad_nli": { - "f1_macro,all": 0.6716751326816636, - "acc,all": 0.7815384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8201601224933464, - "acc,all": 0.8221428571428572 - }, - "oab_exams": { - "acc,all": 0.3630979498861048, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2013-10": 0.35, - "acc,exam_id__2013-12": 0.3375, - "acc,exam_id__2012-08": 0.35, - "acc,exam_id__2010-02": 0.32, - "acc,exam_id__2017-22": 0.475, - "acc,exam_id__2012-07": 0.325, - "acc,exam_id__2011-03": 0.32323232323232326, - "acc,exam_id__2015-17": 0.5256410256410257, - "acc,exam_id__2012-06": 0.4125, - "acc,exam_id__2017-23": 0.375, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2016-20": 0.4, - "acc,exam_id__2015-18": 0.3375, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2012-06a": 0.2875, - "acc,exam_id__2015-16": 0.35, - "acc,exam_id__2013-11": 0.35, - "acc,exam_id__2018-25": 0.4125, - "acc,exam_id__2014-14": 0.425, - "acc,exam_id__2014-15": 0.358974358974359, - "acc,exam_id__2016-19": 0.41025641025641024, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2011-05": 0.275, - "acc,exam_id__2017-24": 0.325, - "acc,exam_id__2010-01": 0.3411764705882353, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6923930249660347, - "acc,all": 0.72737955346651 - }, - "tweetsentbr": { - "f1_macro,all": 0.4814053205818839, - "acc,all": 0.6840796019900498, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8891714080476152, + "acc,all": 0.8897058823529411, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7636816610855701, + "mse,all": 0.5976552287581699, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.4714881780250348, + "acc,exam_id__USP_2018": 0.3148148148148148, + "acc,exam_id__USP_2019": 0.375, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__UNICAMP_2020": 0.509090909090909, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__USP_2022": 0.42857142857142855, + "acc,exam_id__UNICAMP_2022": 0.5128205128205128, + "acc,exam_id__UNICAMP_2019": 0.5, + "acc,exam_id__USP_2021": 0.4423076923076923, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__UNICAMP_2021_2": 0.45098039215686275, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__UNICAMP_2023": 0.5116279069767442, + "acc,exam_id__USP_2023": 0.5454545454545454, + "acc,exam_id__USP_2024": 0.6341463414634146, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5682295311406578, + "acc,exam_id__2013": 0.5555555555555556, + "acc,exam_id__2015": 0.5798319327731093, + "acc,exam_id__2016": 0.5289256198347108, + "acc,exam_id__2014": 0.5871559633027523, + "acc,exam_id__2012": 0.5517241379310345, + "acc,exam_id__2009": 0.5478260869565217, + "acc,exam_id__2016_2": 0.6178861788617886, + "acc,exam_id__2022": 0.5639097744360902, + "acc,exam_id__2017": 0.5086206896551724, + "acc,exam_id__2011": 0.6153846153846154, + "acc,exam_id__2023": 0.6222222222222222, + "acc,exam_id__2010": 0.5299145299145299 + }, + "faquad_nli": { + "f1_macro,all": 0.6716751326816636, + "acc,all": 0.7815384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8201601224933464, + "acc,all": 0.8221428571428572 + }, + "oab_exams": { + "acc,all": 0.3630979498861048, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2013-10": 0.35, + "acc,exam_id__2013-12": 0.3375, + "acc,exam_id__2012-08": 0.35, + "acc,exam_id__2010-02": 0.32, + "acc,exam_id__2017-22": 0.475, + "acc,exam_id__2012-07": 0.325, + "acc,exam_id__2011-03": 0.32323232323232326, + "acc,exam_id__2015-17": 0.5256410256410257, + "acc,exam_id__2012-06": 0.4125, + "acc,exam_id__2017-23": 0.375, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2016-20": 0.4, + "acc,exam_id__2015-18": 0.3375, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2012-06a": 0.2875, + "acc,exam_id__2015-16": 0.35, + "acc,exam_id__2013-11": 0.35, + "acc,exam_id__2018-25": 0.4125, + "acc,exam_id__2014-14": 0.425, + "acc,exam_id__2014-15": 0.358974358974359, + "acc,exam_id__2016-19": 0.41025641025641024, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2011-05": 0.275, + "acc,exam_id__2017-24": 0.325, + "acc,exam_id__2010-01": 0.3411764705882353, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6923930249660347, + "acc,all": 0.72737955346651 + }, + "tweetsentbr": { + "f1_macro,all": 0.6418737607758453, + "acc,all": 0.6840796019900498, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "41b61a33a2483885c981aa79e0df6b32407ed873", - "model_dtype": "torch.float16", - "model_memory_footprint": 15355887616, - "model_num_parameters": 7409504256, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "41b61a33a2483885c981aa79e0df6b32407ed873", + "model_dtype": "torch.float16", + "model_memory_footprint": 15355887616, + "model_num_parameters": 7409504256, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.2,peft=recogna-nlp/mistralbode_7b_qlora_ultraalpaca,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.2,peft=recogna-nlp/mistralbode_7b_qlora_ultraalpaca,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/recogna-nlp/mistralbode_7b_qlora_ultraalpaca/results_2024-04-23T20-19-47.179024.json b/recogna-nlp/mistralbode_7b_qlora_ultraalpaca/results_2024-04-23T20-19-47.179024.json index 74bc68ec5807c59e09890f1530d26b66add2e11f..a52a1986d67599275093f73188e56017fb617a4d 100644 --- a/recogna-nlp/mistralbode_7b_qlora_ultraalpaca/results_2024-04-23T20-19-47.179024.json +++ b/recogna-nlp/mistralbode_7b_qlora_ultraalpaca/results_2024-04-23T20-19-47.179024.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6357002587675458, - "all_grouped_npm": 0.4606450081992029, + "all_grouped_average": 0.6535300854557637, + "all_grouped_npm": 0.48717748839000335, "all_grouped": { "enem_challenge": 0.5682295311406578, "bluex": 0.4714881780250348, @@ -45,7 +45,7 @@ "faquad_nli": 0.6716751326816636, "hatebr_offensive": 0.8201601224933464, "portuguese_hate_speech": 0.6923930249660347, - "tweetsentbr": 0.4814053205818839 + "tweetsentbr": 0.6418737607758453 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5682295311406578, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6716751326816636, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8201601224933464, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6923930249660347, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4814053205818839 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6418737607758453 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5682295311406578, @@ -150,9 +150,9 @@ "main_score": 0.6923930249660347 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4814053205818839, + "f1_macro,all": 0.6418737607758453, "acc,all": 0.6840796019900498, - "main_score": 0.4814053205818839 + "main_score": 0.6418737607758453 } }, "config_tasks": { diff --git a/rhaymison/Mistral-8x7b-portuguese-luana/raw_2024-04-23T05-22-32.742075/results.json b/rhaymison/Mistral-8x7b-portuguese-luana/raw_2024-04-23T05-22-32.742075/results.json index a7f8d8291a1b7f299b5140624de9264ae28c5dfd..7a818c3cac9c326c5ed76e6c56297e05aac23dae 100644 --- a/rhaymison/Mistral-8x7b-portuguese-luana/raw_2024-04-23T05-22-32.742075/results.json +++ b/rhaymison/Mistral-8x7b-portuguese-luana/raw_2024-04-23T05-22-32.742075/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.6121383527313506, - "acc,all": 0.9150326797385621, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7994520046948086, - "mse,all": 0.5195479301470588, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5910987482614742, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__UNICAMP_2020": 0.6363636363636364, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__UNICAMP_2023": 0.7441860465116279, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__USP_2022": 0.5918367346938775, - "acc,exam_id__USP_2024": 0.7560975609756098, - "acc,exam_id__UNICAMP_2019": 0.6, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6962911126662001, - "acc,exam_id__2016": 0.6528925619834711, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2023": 0.7037037037037037, - "acc,exam_id__2009": 0.7043478260869566, - "acc,exam_id__2012": 0.6810344827586207, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2010": 0.7264957264957265, - "acc,exam_id__2013": 0.6759259259259259, - "acc,exam_id__2011": 0.8034188034188035, - "acc,exam_id__2014": 0.7522935779816514, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2015": 0.7226890756302521 - }, - "faquad_nli": { - "f1_macro,all": 0.7860385072937481, - "acc,all": 0.8246153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7241567974832157, - "acc,all": 0.7407142857142858 - }, - "oab_exams": { - "acc,all": 0.496127562642369, - "acc,exam_id__2015-16": 0.5125, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2016-20": 0.4875, - "acc,exam_id__2015-17": 0.6410256410256411, - "acc,exam_id__2013-10": 0.475, - "acc,exam_id__2011-03": 0.45454545454545453, - "acc,exam_id__2016-21": 0.4625, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2013-11": 0.4875, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2017-24": 0.4625, - "acc,exam_id__2017-23": 0.4875, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2012-06a": 0.5375, - "acc,exam_id__2010-02": 0.48, - "acc,exam_id__2013-12": 0.525, - "acc,exam_id__2014-15": 0.6666666666666666, - "acc,exam_id__2012-08": 0.425, - "acc,exam_id__2012-09": 0.5064935064935064, - "acc,exam_id__2016-20a": 0.475, - "acc,exam_id__2014-13": 0.525, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2014-14": 0.575, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2011-05": 0.5375, - "acc,exam_id__2012-07": 0.5, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7301160285642423, - "acc,all": 0.7614571092831962 - }, - "tweetsentbr": { - "f1_macro,all": 0.508994074903654, - "acc,all": 0.7293532338308458, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.918207529097026, + "acc,all": 0.9150326797385621, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7994520046948086, + "mse,all": 0.5195479301470588, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5910987482614742, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__UNICAMP_2020": 0.6363636363636364, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__UNICAMP_2023": 0.7441860465116279, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__USP_2022": 0.5918367346938775, + "acc,exam_id__USP_2024": 0.7560975609756098, + "acc,exam_id__UNICAMP_2019": 0.6, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6962911126662001, + "acc,exam_id__2016": 0.6528925619834711, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2023": 0.7037037037037037, + "acc,exam_id__2009": 0.7043478260869566, + "acc,exam_id__2012": 0.6810344827586207, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2010": 0.7264957264957265, + "acc,exam_id__2013": 0.6759259259259259, + "acc,exam_id__2011": 0.8034188034188035, + "acc,exam_id__2014": 0.7522935779816514, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2015": 0.7226890756302521 + }, + "faquad_nli": { + "f1_macro,all": 0.7860385072937481, + "acc,all": 0.8246153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7241567974832157, + "acc,all": 0.7407142857142858 + }, + "oab_exams": { + "acc,all": 0.496127562642369, + "acc,exam_id__2015-16": 0.5125, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2016-20": 0.4875, + "acc,exam_id__2015-17": 0.6410256410256411, + "acc,exam_id__2013-10": 0.475, + "acc,exam_id__2011-03": 0.45454545454545453, + "acc,exam_id__2016-21": 0.4625, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2013-11": 0.4875, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2017-24": 0.4625, + "acc,exam_id__2017-23": 0.4875, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2012-06a": 0.5375, + "acc,exam_id__2010-02": 0.48, + "acc,exam_id__2013-12": 0.525, + "acc,exam_id__2014-15": 0.6666666666666666, + "acc,exam_id__2012-08": 0.425, + "acc,exam_id__2012-09": 0.5064935064935064, + "acc,exam_id__2016-20a": 0.475, + "acc,exam_id__2014-13": 0.525, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2014-14": 0.575, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2011-05": 0.5375, + "acc,exam_id__2012-07": 0.5, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7301160285642423, + "acc,all": 0.7614571092831962 + }, + "tweetsentbr": { + "f1_macro,all": 0.6786587665382053, + "acc,all": 0.7293532338308458, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "4cd7f88087395df95b19a6475e141cc07ca559cc", - "model_dtype": "torch.float16", - "model_memory_footprint": 24281890816, - "model_num_parameters": 46702792704, - "model_is_loaded_in_4bit": true, - "model_is_loaded_in_8bit": null, - "model_is_quantized": true, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "4cd7f88087395df95b19a6475e141cc07ca559cc", + "model_dtype": "torch.float16", + "model_memory_footprint": 24281890816, + "model_num_parameters": 46702792704, + "model_is_loaded_in_4bit": true, + "model_is_loaded_in_8bit": null, + "model_is_quantized": true, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1709.2492537313433, + "min_seq_length": 1688, + "max_seq_length": 1804, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=rhaymison/Mistral-8x7b-portuguese-luana,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1709.2492537313433, - "min_seq_length": 1688, - "max_seq_length": 1804, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=rhaymison/Mistral-8x7b-portuguese-luana,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/rhaymison/Mistral-8x7b-portuguese-luana/results_2024-04-23T05-22-32.742075.json b/rhaymison/Mistral-8x7b-portuguese-luana/results_2024-04-23T05-22-32.742075.json index b475514efed77f9c92b800fb43e6721c72741bf2..44dfd44c51c5924fc3deb01e835c39e0ceeb15cd 100644 --- a/rhaymison/Mistral-8x7b-portuguese-luana/results_2024-04-23T05-22-32.742075.json +++ b/rhaymison/Mistral-8x7b-portuguese-luana/results_2024-04-23T05-22-32.742075.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6604903543601182, - "all_grouped_npm": 0.47233057257015926, + "all_grouped_average": 0.7133496730268098, + "all_grouped_npm": 0.5683989695063396, "all_grouped": { "enem_challenge": 0.6962911126662001, "bluex": 0.5910987482614742, "oab_exams": 0.496127562642369, - "assin2_rte": 0.6121383527313506, + "assin2_rte": 0.918207529097026, "assin2_sts": 0.7994520046948086, "faquad_nli": 0.7860385072937481, "hatebr_offensive": 0.7241567974832157, "portuguese_hate_speech": 0.7301160285642423, - "tweetsentbr": 0.508994074903654 + "tweetsentbr": 0.6786587665382053 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6962911126662001, "harness|bluex|bluex|None|3": 0.5910987482614742, "harness|oab_exams|oab_exams|None|3": 0.496127562642369, - "harness|assin2_rte|assin2_rte|None|15": 0.6121383527313506, + "harness|assin2_rte|assin2_rte|None|15": 0.918207529097026, "harness|assin2_sts|assin2_sts|None|15": 0.7994520046948086, "harness|faquad_nli|faquad_nli|None|15": 0.7860385072937481, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7241567974832157, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7301160285642423, - "harness|tweetsentbr|tweetsentbr|None|25": 0.508994074903654 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6786587665382053 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6962911126662001, @@ -125,9 +125,9 @@ "main_score": 0.496127562642369 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.6121383527313506, + "f1_macro,all": 0.918207529097026, "acc,all": 0.9150326797385621, - "main_score": 0.6121383527313506 + "main_score": 0.918207529097026 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.7994520046948086, @@ -150,9 +150,9 @@ "main_score": 0.7301160285642423 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.508994074903654, + "f1_macro,all": 0.6786587665382053, "acc,all": 0.7293532338308458, - "main_score": 0.508994074903654 + "main_score": 0.6786587665382053 } }, "config_tasks": { diff --git a/rhaymison/Mistral-portuguese-luana-7b-Mathematics/raw_2024-04-18T00-45-28.065413/results.json b/rhaymison/Mistral-portuguese-luana-7b-Mathematics/raw_2024-04-18T00-45-28.065413/results.json index 5fc976e080fe58116a446d394dc4fd8ce9f13b7d..82fa634719948fafd2f57fc1f14f3973e835b2d8 100644 --- a/rhaymison/Mistral-portuguese-luana-7b-Mathematics/raw_2024-04-18T00-45-28.065413/results.json +++ b/rhaymison/Mistral-portuguese-luana-7b-Mathematics/raw_2024-04-18T00-45-28.065413/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8935916363890852, - "acc,all": 0.8941993464052288, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7478455553944315, - "mse,all": 0.6879618055555556, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.45897079276773295, - "acc,exam_id__UNICAMP_2023": 0.4883720930232558, - "acc,exam_id__UNICAMP_2020": 0.4909090909090909, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2018": 0.2962962962962963, - "acc,exam_id__USP_2019": 0.35, - "acc,exam_id__UNICAMP_2019": 0.46, - "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, - "acc,exam_id__USP_2023": 0.5, - "acc,exam_id__USP_2022": 0.40816326530612246, - "acc,exam_id__USP_2018": 0.37037037037037035, - "acc,exam_id__USP_2020": 0.44642857142857145, - "acc,exam_id__USP_2021": 0.40384615384615385, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5668299510146956, - "acc,exam_id__2022": 0.5263157894736842, - "acc,exam_id__2016": 0.5454545454545454, - "acc,exam_id__2013": 0.5648148148148148, - "acc,exam_id__2016_2": 0.5447154471544715, - "acc,exam_id__2009": 0.5565217391304348, - "acc,exam_id__2023": 0.5925925925925926, - "acc,exam_id__2010": 0.5384615384615384, - "acc,exam_id__2012": 0.5344827586206896, - "acc,exam_id__2017": 0.5258620689655172, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2015": 0.5966386554621849, - "acc,exam_id__2011": 0.6581196581196581 - }, - "faquad_nli": { - "f1_macro,all": 0.7486666666666666, - "acc,all": 0.8215384615384616, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.763946859411585, - "acc,all": 0.7728571428571429 - }, - "oab_exams": { - "acc,all": 0.3790432801822323, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2016-20": 0.3875, - "acc,exam_id__2014-14": 0.375, - "acc,exam_id__2011-05": 0.375, - "acc,exam_id__2011-03": 0.3939393939393939, - "acc,exam_id__2012-06a": 0.325, - "acc,exam_id__2017-22": 0.5, - "acc,exam_id__2011-04": 0.3125, - "acc,exam_id__2010-01": 0.2823529411764706, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2015-17": 0.44871794871794873, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2017-24": 0.4125, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2013-11": 0.375, - "acc,exam_id__2015-18": 0.3125, - "acc,exam_id__2016-21": 0.3375, - "acc,exam_id__2017-23": 0.4, - "acc,exam_id__2012-07": 0.3875, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2013-12": 0.4125, - "acc,exam_id__2012-06": 0.4125, - "acc,exam_id__2013-10": 0.3, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6746063877642825, - "acc,all": 0.7203290246768508 - }, - "tweetsentbr": { - "f1_macro,all": 0.4902684536476732, - "acc,all": 0.6776119402985075, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8935916363890852, + "acc,all": 0.8941993464052288, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7478455553944315, + "mse,all": 0.6879618055555556, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.45897079276773295, + "acc,exam_id__UNICAMP_2023": 0.4883720930232558, + "acc,exam_id__UNICAMP_2020": 0.4909090909090909, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2018": 0.2962962962962963, + "acc,exam_id__USP_2019": 0.35, + "acc,exam_id__UNICAMP_2019": 0.46, + "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764, + "acc,exam_id__USP_2023": 0.5, + "acc,exam_id__USP_2022": 0.40816326530612246, + "acc,exam_id__USP_2018": 0.37037037037037035, + "acc,exam_id__USP_2020": 0.44642857142857145, + "acc,exam_id__USP_2021": 0.40384615384615385, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5668299510146956, + "acc,exam_id__2022": 0.5263157894736842, + "acc,exam_id__2016": 0.5454545454545454, + "acc,exam_id__2013": 0.5648148148148148, + "acc,exam_id__2016_2": 0.5447154471544715, + "acc,exam_id__2009": 0.5565217391304348, + "acc,exam_id__2023": 0.5925925925925926, + "acc,exam_id__2010": 0.5384615384615384, + "acc,exam_id__2012": 0.5344827586206896, + "acc,exam_id__2017": 0.5258620689655172, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2015": 0.5966386554621849, + "acc,exam_id__2011": 0.6581196581196581 + }, + "faquad_nli": { + "f1_macro,all": 0.7486666666666666, + "acc,all": 0.8215384615384616, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.763946859411585, + "acc,all": 0.7728571428571429 + }, + "oab_exams": { + "acc,all": 0.3790432801822323, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2016-20": 0.3875, + "acc,exam_id__2014-14": 0.375, + "acc,exam_id__2011-05": 0.375, + "acc,exam_id__2011-03": 0.3939393939393939, + "acc,exam_id__2012-06a": 0.325, + "acc,exam_id__2017-22": 0.5, + "acc,exam_id__2011-04": 0.3125, + "acc,exam_id__2010-01": 0.2823529411764706, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2015-17": 0.44871794871794873, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2017-24": 0.4125, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2013-11": 0.375, + "acc,exam_id__2015-18": 0.3125, + "acc,exam_id__2016-21": 0.3375, + "acc,exam_id__2017-23": 0.4, + "acc,exam_id__2012-07": 0.3875, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2013-12": 0.4125, + "acc,exam_id__2012-06": 0.4125, + "acc,exam_id__2013-10": 0.3, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6746063877642825, + "acc,all": 0.7203290246768508 + }, + "tweetsentbr": { + "f1_macro,all": 0.6536912715302309, + "acc,all": 0.6776119402985075, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "8f9e9c8dd0df4f6ce6fbd8f3eeda184bec3853dc", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "8f9e9c8dd0df4f6ce6fbd8f3eeda184bec3853dc", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1881.2492537313433, + "min_seq_length": 1860, + "max_seq_length": 1976, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=rhaymison/Mistral-portuguese-luana-7b-Mathematics,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1881.2492537313433, - "min_seq_length": 1860, - "max_seq_length": 1976, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=rhaymison/Mistral-portuguese-luana-7b-Mathematics,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/rhaymison/Mistral-portuguese-luana-7b-Mathematics/raw_2024-04-19T03-01-41.531579/results.json b/rhaymison/Mistral-portuguese-luana-7b-Mathematics/raw_2024-04-19T03-01-41.531579/results.json index 14ad9da50f02bc00b8f8707100c82908231cf5fc..78716b243e38465e94fa4fc40f150c84903fb230 100644 --- a/rhaymison/Mistral-portuguese-luana-7b-Mathematics/raw_2024-04-19T03-01-41.531579/results.json +++ b/rhaymison/Mistral-portuguese-luana-7b-Mathematics/raw_2024-04-19T03-01-41.531579/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8906673114119923, - "acc,all": 0.8913398692810458, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7511980311776982, - "mse,all": 0.6868325980392157, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.4603616133518776, - "acc,exam_id__USP_2018": 0.37037037037037035, - "acc,exam_id__UNICAMP_2021_2": 0.45098039215686275, - "acc,exam_id__UNICAMP_2019": 0.48, - "acc,exam_id__UNICAMP_2020": 0.4727272727272727, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__USP_2019": 0.35, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2018": 0.3148148148148148, - "acc,exam_id__UNICAMP_2023": 0.4883720930232558, - "acc,exam_id__USP_2021": 0.40384615384615385, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__USP_2022": 0.40816326530612246, - "acc,exam_id__USP_2023": 0.5, - "acc,exam_id__USP_2020": 0.42857142857142855, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.56333100069979, - "acc,exam_id__2009": 0.5391304347826087, - "acc,exam_id__2010": 0.5470085470085471, - "acc,exam_id__2022": 0.518796992481203, - "acc,exam_id__2013": 0.5555555555555556, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2016": 0.5454545454545454, - "acc,exam_id__2016_2": 0.5365853658536586, - "acc,exam_id__2015": 0.5966386554621849, - "acc,exam_id__2023": 0.5925925925925926, - "acc,exam_id__2017": 0.5344827586206896, - "acc,exam_id__2012": 0.5172413793103449, - "acc,exam_id__2011": 0.6581196581196581 - }, - "faquad_nli": { - "f1_macro,all": 0.7346670193554287, - "acc,all": 0.8076923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7674448131094258, - "acc,all": 0.7757142857142857 - }, - "oab_exams": { - "acc,all": 0.3817767653758542, - "acc,exam_id__2011-03": 0.40404040404040403, - "acc,exam_id__2015-18": 0.325, - "acc,exam_id__2012-08": 0.325, - "acc,exam_id__2016-20": 0.3875, - "acc,exam_id__2014-14": 0.375, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2013-10": 0.2875, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2018-25": 0.4375, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2017-24": 0.4125, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2012-06a": 0.325, - "acc,exam_id__2017-23": 0.3875, - "acc,exam_id__2013-11": 0.4, - "acc,exam_id__2012-06": 0.425, - "acc,exam_id__2015-17": 0.4358974358974359, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2011-05": 0.3875, - "acc,exam_id__2010-02": 0.44, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2015-16": 0.4, - "acc,exam_id__2017-22": 0.475, - "acc,exam_id__2011-04": 0.325, - "acc,exam_id__2016-21": 0.3375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6734744889754991, - "acc,all": 0.7168037602820212 - }, - "tweetsentbr": { - "f1_macro,all": 0.48796828044417695, - "acc,all": 0.6761194029850747, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8906673114119923, + "acc,all": 0.8913398692810458, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7511980311776982, + "mse,all": 0.6868325980392157, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.4603616133518776, + "acc,exam_id__USP_2018": 0.37037037037037035, + "acc,exam_id__UNICAMP_2021_2": 0.45098039215686275, + "acc,exam_id__UNICAMP_2019": 0.48, + "acc,exam_id__UNICAMP_2020": 0.4727272727272727, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__USP_2019": 0.35, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2018": 0.3148148148148148, + "acc,exam_id__UNICAMP_2023": 0.4883720930232558, + "acc,exam_id__USP_2021": 0.40384615384615385, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__USP_2022": 0.40816326530612246, + "acc,exam_id__USP_2023": 0.5, + "acc,exam_id__USP_2020": 0.42857142857142855, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.56333100069979, + "acc,exam_id__2009": 0.5391304347826087, + "acc,exam_id__2010": 0.5470085470085471, + "acc,exam_id__2022": 0.518796992481203, + "acc,exam_id__2013": 0.5555555555555556, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2016": 0.5454545454545454, + "acc,exam_id__2016_2": 0.5365853658536586, + "acc,exam_id__2015": 0.5966386554621849, + "acc,exam_id__2023": 0.5925925925925926, + "acc,exam_id__2017": 0.5344827586206896, + "acc,exam_id__2012": 0.5172413793103449, + "acc,exam_id__2011": 0.6581196581196581 + }, + "faquad_nli": { + "f1_macro,all": 0.7346670193554287, + "acc,all": 0.8076923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7674448131094258, + "acc,all": 0.7757142857142857 + }, + "oab_exams": { + "acc,all": 0.3817767653758542, + "acc,exam_id__2011-03": 0.40404040404040403, + "acc,exam_id__2015-18": 0.325, + "acc,exam_id__2012-08": 0.325, + "acc,exam_id__2016-20": 0.3875, + "acc,exam_id__2014-14": 0.375, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2013-10": 0.2875, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2018-25": 0.4375, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2017-24": 0.4125, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2012-06a": 0.325, + "acc,exam_id__2017-23": 0.3875, + "acc,exam_id__2013-11": 0.4, + "acc,exam_id__2012-06": 0.425, + "acc,exam_id__2015-17": 0.4358974358974359, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2011-05": 0.3875, + "acc,exam_id__2010-02": 0.44, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2015-16": 0.4, + "acc,exam_id__2017-22": 0.475, + "acc,exam_id__2011-04": 0.325, + "acc,exam_id__2016-21": 0.3375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6734744889754991, + "acc,all": 0.7168037602820212 + }, + "tweetsentbr": { + "f1_macro,all": 0.6506243739255694, + "acc,all": 0.6761194029850747, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 3, - "non_truncated": 14147, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 3, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2dadca915547d2a13c1e6162dc2c899056bd1053", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1451.7455065359477, - "min_seq_length": 1428, - "max_seq_length": 1518, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1675.7455065359477, - "min_seq_length": 1652, - "max_seq_length": 1742, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 1, - "non_truncated": 718, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 1, - "mean_seq_length": 1744.9262865090404, - "min_seq_length": 1368, - "max_seq_length": 2545, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998609179415855 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1645.039188243527, - "min_seq_length": 1379, - "max_seq_length": 2643, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1691.9876923076922, - "min_seq_length": 1636, - "max_seq_length": 1812, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 3, + "non_truncated": 14147, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 3, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2dadca915547d2a13c1e6162dc2c899056bd1053", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1462.3878571428572, - "min_seq_length": 1439, - "max_seq_length": 1713, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1390.764464692483, - "min_seq_length": 1124, - "max_seq_length": 1893, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1451.7455065359477, + "min_seq_length": 1428, + "max_seq_length": 1518, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1675.7455065359477, + "min_seq_length": 1652, + "max_seq_length": 1742, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 1, + "non_truncated": 718, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 1, + "mean_seq_length": 1744.9262865090404, + "min_seq_length": 1368, + "max_seq_length": 2545, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998609179415855 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1645.039188243527, + "min_seq_length": 1379, + "max_seq_length": 2643, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1691.9876923076922, + "min_seq_length": 1636, + "max_seq_length": 1812, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1462.3878571428572, + "min_seq_length": 1439, + "max_seq_length": 1713, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1390.764464692483, + "min_seq_length": 1124, + "max_seq_length": 1893, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1963.3360752056403, + "min_seq_length": 1928, + "max_seq_length": 2002, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1881.2492537313433, + "min_seq_length": 1860, + "max_seq_length": 1976, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1963.3360752056403, - "min_seq_length": 1928, - "max_seq_length": 2002, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=rhaymison/Mistral-portuguese-luana-7b-Mathematics,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1881.2492537313433, - "min_seq_length": 1860, - "max_seq_length": 1976, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=rhaymison/Mistral-portuguese-luana-7b-Mathematics,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/rhaymison/Mistral-portuguese-luana-7b-Mathematics/results_2024-04-18T00-45-28.065413.json b/rhaymison/Mistral-portuguese-luana-7b-Mathematics/results_2024-04-18T00-45-28.065413.json index 00028f286b89cff99e650eac804411095294b6a4..cb9e45f36d4bbef780689c228bbcbad75c49e87a 100644 --- a/rhaymison/Mistral-portuguese-luana-7b-Mathematics/results_2024-04-18T00-45-28.065413.json +++ b/rhaymison/Mistral-portuguese-luana-7b-Mathematics/results_2024-04-18T00-45-28.065413.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6359743981375985, - "all_grouped_npm": 0.4611467468251455, + "all_grouped_average": 0.6541324890134381, + "all_grouped_npm": 0.4881677153903832, "all_grouped": { "enem_challenge": 0.5668299510146956, "bluex": 0.45897079276773295, @@ -45,7 +45,7 @@ "faquad_nli": 0.7486666666666666, "hatebr_offensive": 0.763946859411585, "portuguese_hate_speech": 0.6746063877642825, - "tweetsentbr": 0.4902684536476732 + "tweetsentbr": 0.6536912715302309 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5668299510146956, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7486666666666666, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.763946859411585, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6746063877642825, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4902684536476732 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6536912715302309 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5668299510146956, @@ -150,9 +150,9 @@ "main_score": 0.6746063877642825 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4902684536476732, + "f1_macro,all": 0.6536912715302309, "acc,all": 0.6776119402985075, - "main_score": 0.4902684536476732 + "main_score": 0.6536912715302309 } }, "config_tasks": { diff --git a/rhaymison/Mistral-portuguese-luana-7b-Mathematics/results_2024-04-19T03-01-41.531579.json b/rhaymison/Mistral-portuguese-luana-7b-Mathematics/results_2024-04-19T03-01-41.531579.json index bdfe113911ccfe339dadf212c08b53b57ec87c6d..871b62ad717a71c26ba6d059d83a5d226fe9ccda 100644 --- a/rhaymison/Mistral-portuguese-luana-7b-Mathematics/results_2024-04-19T03-01-41.531579.json +++ b/rhaymison/Mistral-portuguese-luana-7b-Mathematics/results_2024-04-19T03-01-41.531579.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6345432582113048, - "all_grouped_npm": 0.458283994205074, + "all_grouped_average": 0.6526161574870151, + "all_grouped_npm": 0.4851781895558333, "all_grouped": { "enem_challenge": 0.56333100069979, "bluex": 0.4603616133518776, @@ -45,7 +45,7 @@ "faquad_nli": 0.7346670193554287, "hatebr_offensive": 0.7674448131094258, "portuguese_hate_speech": 0.6734744889754991, - "tweetsentbr": 0.48796828044417695 + "tweetsentbr": 0.6506243739255694 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.56333100069979, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7346670193554287, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7674448131094258, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6734744889754991, - "harness|tweetsentbr|tweetsentbr|None|25": 0.48796828044417695 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6506243739255694 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.56333100069979, @@ -150,9 +150,9 @@ "main_score": 0.6734744889754991 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.48796828044417695, + "f1_macro,all": 0.6506243739255694, "acc,all": 0.6761194029850747, - "main_score": 0.48796828044417695 + "main_score": 0.6506243739255694 } }, "config_tasks": { diff --git a/rhaymison/gemma-portuguese-2b-it/raw_2024-04-11T20-12-36.339974/results.json b/rhaymison/gemma-portuguese-2b-it/raw_2024-04-11T20-12-36.339974/results.json index c9d79ac2e512f301a4c6d15ee1b56032e3d69e9e..df171219e441961b7851d510c8dbbcc19394b052 100644 --- a/rhaymison/gemma-portuguese-2b-it/raw_2024-04-11T20-12-36.339974/results.json +++ b/rhaymison/gemma-portuguese-2b-it/raw_2024-04-11T20-12-36.339974/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.18056089127929312, - "acc,all": 0.28799019607843135, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0, - "mse,all": 3.062949346405229, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.0, - "acc,exam_id__UNICAMP_2020": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__USP_2020": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__USP_2022": 0.0, - "acc,exam_id__USP_2024": 0.0, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__USP_2021": 0.0, - "acc,exam_id__USP_2018": 0.0, - "acc,exam_id__UNICAMP_2019": 0.0, - "acc,exam_id__UNICAMP_2021_2": 0.0, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__USP_2023": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.0, - "acc,exam_id__2009": 0.0, - "acc,exam_id__2016_2": 0.0, - "acc,exam_id__2017": 0.0, - "acc,exam_id__2010": 0.0, - "acc,exam_id__2014": 0.0, - "acc,exam_id__2023": 0.0, - "acc,exam_id__2016": 0.0, - "acc,exam_id__2015": 0.0, - "acc,exam_id__2013": 0.0, - "acc,exam_id__2012": 0.0, - "acc,exam_id__2011": 0.0, - "acc,exam_id__2022": 0.0 - }, - "faquad_nli": { - "f1_macro,all": 0.01773049645390071, - "acc,all": 0.007692307692307693, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.18019226469930694, - "acc,all": 0.28785714285714287 - }, - "oab_exams": { - "acc,all": 0.0, - "acc,exam_id__2012-09": 0.0, - "acc,exam_id__2012-06": 0.0, - "acc,exam_id__2012-07": 0.0, - "acc,exam_id__2010-02": 0.0, - "acc,exam_id__2013-11": 0.0, - "acc,exam_id__2016-21": 0.0, - "acc,exam_id__2017-22": 0.0, - "acc,exam_id__2016-20a": 0.0, - "acc,exam_id__2013-10": 0.0, - "acc,exam_id__2011-03": 0.0, - "acc,exam_id__2017-23": 0.0, - "acc,exam_id__2011-04": 0.0, - "acc,exam_id__2016-19": 0.0, - "acc,exam_id__2016-20": 0.0, - "acc,exam_id__2017-24": 0.0, - "acc,exam_id__2014-13": 0.0, - "acc,exam_id__2011-05": 0.0, - "acc,exam_id__2014-14": 0.0, - "acc,exam_id__2013-12": 0.0, - "acc,exam_id__2012-08": 0.0, - "acc,exam_id__2014-15": 0.0, - "acc,exam_id__2015-17": 0.0, - "acc,exam_id__2012-06a": 0.0, - "acc,exam_id__2015-18": 0.0, - "acc,exam_id__2010-01": 0.0, - "acc,exam_id__2015-16": 0.0, - "acc,exam_id__2018-25": 0.0, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.0025940337224383916, - "acc,all": 0.0011750881316098707 - }, - "tweetsentbr": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.2708413369189397, + "acc,all": 0.28799019607843135, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0, + "mse,all": 3.062949346405229, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.0, + "acc,exam_id__UNICAMP_2020": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__USP_2020": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__USP_2022": 0.0, + "acc,exam_id__USP_2024": 0.0, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__USP_2021": 0.0, + "acc,exam_id__USP_2018": 0.0, + "acc,exam_id__UNICAMP_2019": 0.0, + "acc,exam_id__UNICAMP_2021_2": 0.0, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__USP_2023": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.0, + "acc,exam_id__2009": 0.0, + "acc,exam_id__2016_2": 0.0, + "acc,exam_id__2017": 0.0, + "acc,exam_id__2010": 0.0, + "acc,exam_id__2014": 0.0, + "acc,exam_id__2023": 0.0, + "acc,exam_id__2016": 0.0, + "acc,exam_id__2015": 0.0, + "acc,exam_id__2013": 0.0, + "acc,exam_id__2012": 0.0, + "acc,exam_id__2011": 0.0, + "acc,exam_id__2022": 0.0 + }, + "faquad_nli": { + "f1_macro,all": 0.02659574468085106, + "acc,all": 0.007692307692307693, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.2702883970489604, + "acc,all": 0.28785714285714287 + }, + "oab_exams": { + "acc,all": 0.0, + "acc,exam_id__2012-09": 0.0, + "acc,exam_id__2012-06": 0.0, + "acc,exam_id__2012-07": 0.0, + "acc,exam_id__2010-02": 0.0, + "acc,exam_id__2013-11": 0.0, + "acc,exam_id__2016-21": 0.0, + "acc,exam_id__2017-22": 0.0, + "acc,exam_id__2016-20a": 0.0, + "acc,exam_id__2013-10": 0.0, + "acc,exam_id__2011-03": 0.0, + "acc,exam_id__2017-23": 0.0, + "acc,exam_id__2011-04": 0.0, + "acc,exam_id__2016-19": 0.0, + "acc,exam_id__2016-20": 0.0, + "acc,exam_id__2017-24": 0.0, + "acc,exam_id__2014-13": 0.0, + "acc,exam_id__2011-05": 0.0, + "acc,exam_id__2014-14": 0.0, + "acc,exam_id__2013-12": 0.0, + "acc,exam_id__2012-08": 0.0, + "acc,exam_id__2014-15": 0.0, + "acc,exam_id__2015-17": 0.0, + "acc,exam_id__2012-06a": 0.0, + "acc,exam_id__2015-18": 0.0, + "acc,exam_id__2010-01": 0.0, + "acc,exam_id__2015-16": 0.0, + "acc,exam_id__2018-25": 0.0, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.0038910505836575876, + "acc,all": 0.0011750881316098707 + }, + "tweetsentbr": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2e73faabdf0eea30ebd32fb44455f855722fa640", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 5012344832, - "model_num_parameters": 2506172416, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1151.8839869281046, - "min_seq_length": 1137, - "max_seq_length": 1194, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1345.8839869281046, - "min_seq_length": 1331, - "max_seq_length": 1388, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1408.4464534075105, - "min_seq_length": 1115, - "max_seq_length": 2028, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1250.8278516445066, - "min_seq_length": 1054, - "max_seq_length": 2097, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1237.1338461538462, - "min_seq_length": 1199, - "max_seq_length": 1310, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1129.505, - "min_seq_length": 1114, - "max_seq_length": 1318, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2e73faabdf0eea30ebd32fb44455f855722fa640", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 5012344832, + "model_num_parameters": 2506172416, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1022.4004555808656, - "min_seq_length": 826, - "max_seq_length": 1357, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1151.8839869281046, + "min_seq_length": 1137, + "max_seq_length": 1194, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1345.8839869281046, + "min_seq_length": 1331, + "max_seq_length": 1388, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1408.4464534075105, + "min_seq_length": 1115, + "max_seq_length": 2028, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1250.8278516445066, + "min_seq_length": 1054, + "max_seq_length": 2097, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1237.1338461538462, + "min_seq_length": 1199, + "max_seq_length": 1310, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1129.505, + "min_seq_length": 1114, + "max_seq_length": 1318, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1022.4004555808656, + "min_seq_length": 826, + "max_seq_length": 1357, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1505.13866039953, + "min_seq_length": 1477, + "max_seq_length": 1536, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1334.9791044776118, + "min_seq_length": 1320, + "max_seq_length": 1377, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1505.13866039953, - "min_seq_length": 1477, - "max_seq_length": 1536, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=rhaymison/gemma-portuguese-2b-it,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1334.9791044776118, - "min_seq_length": 1320, - "max_seq_length": 1377, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=rhaymison/gemma-portuguese-2b-it,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/rhaymison/gemma-portuguese-2b-it/results_2024-04-11T20-12-36.339974.json b/rhaymison/gemma-portuguese-2b-it/results_2024-04-11T20-12-36.339974.json index 0f0c747ac232d8c7a8a900519afb813b53143f2f..e31491c04efcb636574f86ea30f32f076656e7c2 100644 --- a/rhaymison/gemma-portuguese-2b-it/results_2024-04-11T20-12-36.339974.json +++ b/rhaymison/gemma-portuguese-2b-it/results_2024-04-11T20-12-36.339974.json @@ -34,28 +34,28 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.042341965128326574, - "all_grouped_npm": -0.4844771283258766, + "all_grouped_average": 0.06351294769248986, + "all_grouped_npm": -0.4423061234478991, "all_grouped": { "enem_challenge": 0.0, "bluex": 0.0, "oab_exams": 0.0, - "assin2_rte": 0.18056089127929312, + "assin2_rte": 0.2708413369189397, "assin2_sts": 0.0, - "faquad_nli": 0.01773049645390071, - "hatebr_offensive": 0.18019226469930694, - "portuguese_hate_speech": 0.0025940337224383916, + "faquad_nli": 0.02659574468085106, + "hatebr_offensive": 0.2702883970489604, + "portuguese_hate_speech": 0.0038910505836575876, "tweetsentbr": 0.0 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.0, "harness|bluex|bluex|None|3": 0.0, "harness|oab_exams|oab_exams|None|3": 0.0, - "harness|assin2_rte|assin2_rte|None|15": 0.18056089127929312, + "harness|assin2_rte|assin2_rte|None|15": 0.2708413369189397, "harness|assin2_sts|assin2_sts|None|15": 0, - "harness|faquad_nli|faquad_nli|None|15": 0.01773049645390071, - "harness|hatebr_offensive|hatebr_offensive|None|25": 0.18019226469930694, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0025940337224383916, + "harness|faquad_nli|faquad_nli|None|15": 0.02659574468085106, + "harness|hatebr_offensive|hatebr_offensive|None|25": 0.2702883970489604, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0038910505836575876, "harness|tweetsentbr|tweetsentbr|None|25": 0.0 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -125,9 +125,9 @@ "main_score": 0.0 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.18056089127929312, + "f1_macro,all": 0.2708413369189397, "acc,all": 0.28799019607843135, - "main_score": 0.18056089127929312 + "main_score": 0.2708413369189397 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0, @@ -135,19 +135,19 @@ "main_score": 0 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.01773049645390071, + "f1_macro,all": 0.02659574468085106, "acc,all": 0.007692307692307693, - "main_score": 0.01773049645390071 + "main_score": 0.02659574468085106 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { - "f1_macro,all": 0.18019226469930694, + "f1_macro,all": 0.2702883970489604, "acc,all": 0.28785714285714287, - "main_score": 0.18019226469930694 + "main_score": 0.2702883970489604 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.0025940337224383916, + "f1_macro,all": 0.0038910505836575876, "acc,all": 0.0011750881316098707, - "main_score": 0.0025940337224383916 + "main_score": 0.0038910505836575876 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.0, diff --git a/rhaymison/gemma-portuguese-tom-cat-2b-it/raw_2024-04-30T13-01-42.167960/results.json b/rhaymison/gemma-portuguese-tom-cat-2b-it/raw_2024-04-30T13-01-42.167960/results.json index 7eff5a9f19adcffe22b05c6b5463f08c42d85ffd..993511f97880880cec65cfd8e7496e7d44918d95 100644 --- a/rhaymison/gemma-portuguese-tom-cat-2b-it/raw_2024-04-30T13-01-42.167960/results.json +++ b/rhaymison/gemma-portuguese-tom-cat-2b-it/raw_2024-04-30T13-01-42.167960/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.46844825942608453, - "acc,all": 0.7099673202614379, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.14057451236845117, - "mse,all": 2.5026424055826797, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.2906815020862309, - "acc,exam_id__UNICAMP_2024": 0.4, - "acc,exam_id__USP_2020": 0.30357142857142855, - "acc,exam_id__UNICAMP_2023": 0.32558139534883723, - "acc,exam_id__UNICAMP_2020": 0.2727272727272727, - "acc,exam_id__USP_2024": 0.2682926829268293, - "acc,exam_id__UNICAMP_2019": 0.2, - "acc,exam_id__UNICAMP_2021_1": 0.32608695652173914, - "acc,exam_id__UNICAMP_2022": 0.38461538461538464, - "acc,exam_id__USP_2021": 0.19230769230769232, - "acc,exam_id__USP_2023": 0.3409090909090909, - "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, - "acc,exam_id__USP_2022": 0.2653061224489796, - "acc,exam_id__USP_2019": 0.25, - "acc,exam_id__UNICAMP_2018": 0.2962962962962963, - "acc,exam_id__USP_2018": 0.2037037037037037, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.27711686494051785, - "acc,exam_id__2016_2": 0.3170731707317073, - "acc,exam_id__2013": 0.3333333333333333, - "acc,exam_id__2010": 0.26495726495726496, - "acc,exam_id__2015": 0.2689075630252101, - "acc,exam_id__2014": 0.24770642201834864, - "acc,exam_id__2009": 0.17391304347826086, - "acc,exam_id__2011": 0.2564102564102564, - "acc,exam_id__2016": 0.30578512396694213, - "acc,exam_id__2023": 0.34074074074074073, - "acc,exam_id__2017": 0.25862068965517243, - "acc,exam_id__2022": 0.3007518796992481, - "acc,exam_id__2012": 0.2413793103448276 - }, - "faquad_nli": { - "f1_macro,all": 0.2939268008165646, - "acc,all": 0.7646153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.4659385571754847, - "acc,all": 0.5557142857142857 - }, - "oab_exams": { - "acc,all": 0.2797266514806378, - "acc,exam_id__2015-17": 0.2948717948717949, - "acc,exam_id__2017-23": 0.325, - "acc,exam_id__2013-10": 0.225, - "acc,exam_id__2016-20a": 0.2375, - "acc,exam_id__2014-13": 0.2625, - "acc,exam_id__2010-01": 0.25882352941176473, - "acc,exam_id__2011-04": 0.25, - "acc,exam_id__2010-02": 0.28, - "acc,exam_id__2016-20": 0.275, - "acc,exam_id__2012-06a": 0.275, - "acc,exam_id__2012-07": 0.3, - "acc,exam_id__2012-08": 0.3125, - "acc,exam_id__2014-15": 0.34615384615384615, - "acc,exam_id__2011-03": 0.30303030303030304, - "acc,exam_id__2015-18": 0.3, - "acc,exam_id__2011-05": 0.2625, - "acc,exam_id__2012-09": 0.2857142857142857, - "acc,exam_id__2012-06": 0.225, - "acc,exam_id__2015-16": 0.3125, - "acc,exam_id__2017-24": 0.2375, - "acc,exam_id__2013-12": 0.3125, - "acc,exam_id__2016-19": 0.28205128205128205, - "acc,exam_id__2016-21": 0.25, - "acc,exam_id__2013-11": 0.2875, - "acc,exam_id__2014-14": 0.2375, - "acc,exam_id__2017-22": 0.275, - "acc,exam_id__2018-25": 0.3375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.4535719463949129, - "acc,all": 0.4535840188014101 - }, - "tweetsentbr": { - "f1_macro,all": 0.18857479622284484, - "acc,all": 0.4616915422885572, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.7026723891391267, + "acc,all": 0.7099673202614379, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.14057451236845117, + "mse,all": 2.5026424055826797, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.2906815020862309, + "acc,exam_id__UNICAMP_2024": 0.4, + "acc,exam_id__USP_2020": 0.30357142857142855, + "acc,exam_id__UNICAMP_2023": 0.32558139534883723, + "acc,exam_id__UNICAMP_2020": 0.2727272727272727, + "acc,exam_id__USP_2024": 0.2682926829268293, + "acc,exam_id__UNICAMP_2019": 0.2, + "acc,exam_id__UNICAMP_2021_1": 0.32608695652173914, + "acc,exam_id__UNICAMP_2022": 0.38461538461538464, + "acc,exam_id__USP_2021": 0.19230769230769232, + "acc,exam_id__USP_2023": 0.3409090909090909, + "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, + "acc,exam_id__USP_2022": 0.2653061224489796, + "acc,exam_id__USP_2019": 0.25, + "acc,exam_id__UNICAMP_2018": 0.2962962962962963, + "acc,exam_id__USP_2018": 0.2037037037037037, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.27711686494051785, + "acc,exam_id__2016_2": 0.3170731707317073, + "acc,exam_id__2013": 0.3333333333333333, + "acc,exam_id__2010": 0.26495726495726496, + "acc,exam_id__2015": 0.2689075630252101, + "acc,exam_id__2014": 0.24770642201834864, + "acc,exam_id__2009": 0.17391304347826086, + "acc,exam_id__2011": 0.2564102564102564, + "acc,exam_id__2016": 0.30578512396694213, + "acc,exam_id__2023": 0.34074074074074073, + "acc,exam_id__2017": 0.25862068965517243, + "acc,exam_id__2022": 0.3007518796992481, + "acc,exam_id__2012": 0.2413793103448276 + }, + "faquad_nli": { + "f1_macro,all": 0.4408902012248469, + "acc,all": 0.7646153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.4659385571754847, + "acc,all": 0.5557142857142857 + }, + "oab_exams": { + "acc,all": 0.2797266514806378, + "acc,exam_id__2015-17": 0.2948717948717949, + "acc,exam_id__2017-23": 0.325, + "acc,exam_id__2013-10": 0.225, + "acc,exam_id__2016-20a": 0.2375, + "acc,exam_id__2014-13": 0.2625, + "acc,exam_id__2010-01": 0.25882352941176473, + "acc,exam_id__2011-04": 0.25, + "acc,exam_id__2010-02": 0.28, + "acc,exam_id__2016-20": 0.275, + "acc,exam_id__2012-06a": 0.275, + "acc,exam_id__2012-07": 0.3, + "acc,exam_id__2012-08": 0.3125, + "acc,exam_id__2014-15": 0.34615384615384615, + "acc,exam_id__2011-03": 0.30303030303030304, + "acc,exam_id__2015-18": 0.3, + "acc,exam_id__2011-05": 0.2625, + "acc,exam_id__2012-09": 0.2857142857142857, + "acc,exam_id__2012-06": 0.225, + "acc,exam_id__2015-16": 0.3125, + "acc,exam_id__2017-24": 0.2375, + "acc,exam_id__2013-12": 0.3125, + "acc,exam_id__2016-19": 0.28205128205128205, + "acc,exam_id__2016-21": 0.25, + "acc,exam_id__2013-11": 0.2875, + "acc,exam_id__2014-14": 0.2375, + "acc,exam_id__2017-22": 0.275, + "acc,exam_id__2018-25": 0.3375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4535719463949129, + "acc,all": 0.4535840188014101 + }, + "tweetsentbr": { + "f1_macro,all": 0.2514330616304598, + "acc,all": 0.4616915422885572, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e5c1d7bec299b8214888b6442dbb333115873e2e", - "model_dtype": "torch.float16", - "model_memory_footprint": 5012344832, - "model_num_parameters": 2506172416, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 2, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1151.8839869281046, - "min_seq_length": 1137, - "max_seq_length": 1194, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1345.8839869281046, - "min_seq_length": 1331, - "max_seq_length": 1388, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1408.4464534075105, - "min_seq_length": 1115, - "max_seq_length": 2028, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1250.8278516445066, - "min_seq_length": 1054, - "max_seq_length": 2097, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1237.1338461538462, - "min_seq_length": 1199, - "max_seq_length": 1310, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e5c1d7bec299b8214888b6442dbb333115873e2e", + "model_dtype": "torch.float16", + "model_memory_footprint": 5012344832, + "model_num_parameters": 2506172416, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 2, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1129.505, - "min_seq_length": 1114, - "max_seq_length": 1318, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1022.4004555808656, - "min_seq_length": 826, - "max_seq_length": 1357, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1151.8839869281046, + "min_seq_length": 1137, + "max_seq_length": 1194, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1345.8839869281046, + "min_seq_length": 1331, + "max_seq_length": 1388, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1408.4464534075105, + "min_seq_length": 1115, + "max_seq_length": 2028, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1250.8278516445066, + "min_seq_length": 1054, + "max_seq_length": 2097, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1237.1338461538462, + "min_seq_length": 1199, + "max_seq_length": 1310, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1129.505, + "min_seq_length": 1114, + "max_seq_length": 1318, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1022.4004555808656, + "min_seq_length": 826, + "max_seq_length": 1357, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1505.13866039953, + "min_seq_length": 1477, + "max_seq_length": 1536, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1334.9791044776118, + "min_seq_length": 1320, + "max_seq_length": 1377, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1505.13866039953, - "min_seq_length": 1477, - "max_seq_length": 1536, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=rhaymison/gemma-portuguese-tom-cat-2b-it,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1334.9791044776118, - "min_seq_length": 1320, - "max_seq_length": 1377, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=rhaymison/gemma-portuguese-tom-cat-2b-it,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/rhaymison/gemma-portuguese-tom-cat-2b-it/results_2024-04-30T13-01-42.167960.json b/rhaymison/gemma-portuguese-tom-cat-2b-it/results_2024-04-30T13-01-42.167960.json index afe54c9994eb078d224f11efdc4dd55ebd363084..f473a96b44a5e37c868e3c44eba7ae649ac9595e 100644 --- a/rhaymison/gemma-portuguese-tom-cat-2b-it/results_2024-04-30T13-01-42.167960.json +++ b/rhaymison/gemma-portuguese-tom-cat-2b-it/results_2024-04-30T13-01-42.167960.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.31761776565685884, - "all_grouped_npm": -0.03600920159743054, + "all_grouped_average": 0.3669561873822966, + "all_grouped_npm": 0.05645087111680623, "all_grouped": { "enem_challenge": 0.27711686494051785, "bluex": 0.2906815020862309, "oab_exams": 0.2797266514806378, - "assin2_rte": 0.46844825942608453, + "assin2_rte": 0.7026723891391267, "assin2_sts": 0.14057451236845117, - "faquad_nli": 0.2939268008165646, + "faquad_nli": 0.4408902012248469, "hatebr_offensive": 0.4659385571754847, "portuguese_hate_speech": 0.4535719463949129, - "tweetsentbr": 0.18857479622284484 + "tweetsentbr": 0.2514330616304598 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.27711686494051785, "harness|bluex|bluex|None|3": 0.2906815020862309, "harness|oab_exams|oab_exams|None|3": 0.2797266514806378, - "harness|assin2_rte|assin2_rte|None|15": 0.46844825942608453, + "harness|assin2_rte|assin2_rte|None|15": 0.7026723891391267, "harness|assin2_sts|assin2_sts|None|15": 0.14057451236845117, - "harness|faquad_nli|faquad_nli|None|15": 0.2939268008165646, + "harness|faquad_nli|faquad_nli|None|15": 0.4408902012248469, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4659385571754847, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4535719463949129, - "harness|tweetsentbr|tweetsentbr|None|25": 0.18857479622284484 + "harness|tweetsentbr|tweetsentbr|None|25": 0.2514330616304598 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.27711686494051785, @@ -125,9 +125,9 @@ "main_score": 0.2797266514806378 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.46844825942608453, + "f1_macro,all": 0.7026723891391267, "acc,all": 0.7099673202614379, - "main_score": 0.46844825942608453 + "main_score": 0.7026723891391267 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.14057451236845117, @@ -135,9 +135,9 @@ "main_score": 0.14057451236845117 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.2939268008165646, + "f1_macro,all": 0.4408902012248469, "acc,all": 0.7646153846153846, - "main_score": 0.2939268008165646 + "main_score": 0.4408902012248469 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.4659385571754847, @@ -150,9 +150,9 @@ "main_score": 0.4535719463949129 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.18857479622284484, + "f1_macro,all": 0.2514330616304598, "acc,all": 0.4616915422885572, - "main_score": 0.18857479622284484 + "main_score": 0.2514330616304598 } }, "config_tasks": { diff --git a/rishiraj/CatPPT-base/raw_2024-02-26T18-41-56.705669/results.json b/rishiraj/CatPPT-base/raw_2024-02-26T18-41-56.705669/results.json index 7a5fb3a90bd41c891742a583bfe21b1281ab3bd8..986555a1a49fb2d5eb6f4b6591c84714989fdde9 100644 --- a/rishiraj/CatPPT-base/raw_2024-02-26T18-41-56.705669/results.json +++ b/rishiraj/CatPPT-base/raw_2024-02-26T18-41-56.705669/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9235804933406541, - "acc,all": 0.9236111111111112, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7784430066746557, - "mse,all": 0.4820465686274509, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5591098748261474, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__USP_2020": 0.5892857142857143, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2023": 0.6363636363636364, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__UNICAMP_2023": 0.5348837209302325, - "acc,exam_id__USP_2019": 0.45, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6536039188243526, - "acc,exam_id__2011": 0.717948717948718, - "acc,exam_id__2016": 0.5950413223140496, - "acc,exam_id__2012": 0.6551724137931034, - "acc,exam_id__2015": 0.6218487394957983, - "acc,exam_id__2010": 0.6495726495726496, - "acc,exam_id__2023": 0.7185185185185186, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2013": 0.6481481481481481, - "acc,exam_id__2014": 0.6422018348623854, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2022": 0.6390977443609023 - }, - "faquad_nli": { - "f1_macro,all": 0.769428955491387, - "acc,all": 0.8246153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8583629779928524, - "acc,all": 0.8592857142857143 - }, - "oab_exams": { - "acc,all": 0.4296127562642369, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2010-02": 0.48, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2012-06a": 0.45, - "acc,exam_id__2014-15": 0.5128205128205128, - "acc,exam_id__2017-22": 0.5125, - "acc,exam_id__2016-19": 0.48717948717948717, - "acc,exam_id__2010-01": 0.4117647058823529, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2014-13": 0.375, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2015-17": 0.5384615384615384, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2012-06": 0.475, - "acc,exam_id__2013-12": 0.3875, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2018-25": 0.425, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6725854591241898, - "acc,all": 0.6897767332549941 - }, - "tweetsentbr": { - "f1_macro,all": 0.46780307205929983, - "acc,all": 0.6965174129353234, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9235804933406541, + "acc,all": 0.9236111111111112, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7784430066746557, + "mse,all": 0.4820465686274509, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5591098748261474, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__USP_2020": 0.5892857142857143, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2023": 0.6363636363636364, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__UNICAMP_2023": 0.5348837209302325, + "acc,exam_id__USP_2019": 0.45, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6536039188243526, + "acc,exam_id__2011": 0.717948717948718, + "acc,exam_id__2016": 0.5950413223140496, + "acc,exam_id__2012": 0.6551724137931034, + "acc,exam_id__2015": 0.6218487394957983, + "acc,exam_id__2010": 0.6495726495726496, + "acc,exam_id__2023": 0.7185185185185186, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2013": 0.6481481481481481, + "acc,exam_id__2014": 0.6422018348623854, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2022": 0.6390977443609023 + }, + "faquad_nli": { + "f1_macro,all": 0.769428955491387, + "acc,all": 0.8246153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8583629779928524, + "acc,all": 0.8592857142857143 + }, + "oab_exams": { + "acc,all": 0.4296127562642369, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2010-02": 0.48, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2012-06a": 0.45, + "acc,exam_id__2014-15": 0.5128205128205128, + "acc,exam_id__2017-22": 0.5125, + "acc,exam_id__2016-19": 0.48717948717948717, + "acc,exam_id__2010-01": 0.4117647058823529, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2014-13": 0.375, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2015-17": 0.5384615384615384, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2012-06": 0.475, + "acc,exam_id__2013-12": 0.3875, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2018-25": 0.425, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6725854591241898, + "acc,all": 0.6897767332549941 + }, + "tweetsentbr": { + "f1_macro,all": 0.6237374294123997, + "acc,all": 0.6965174129353234, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2f73ffc6b55b7bc3e0a1cc6fa613ee948c9484c4", - "model_dtype": "torch.float16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2f73ffc6b55b7bc3e0a1cc6fa613ee948c9484c4", + "model_dtype": "torch.float16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=rishiraj/CatPPT-base,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=rishiraj/CatPPT-base,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/rishiraj/CatPPT-base/results_2024-02-26T18-41-56.705669.json b/rishiraj/CatPPT-base/results_2024-02-26T18-41-56.705669.json index a00d30f1ce33acdaee634234384fb661313fd017..2d101e0d49041f918721107c19a2c9d4721637cc 100644 --- a/rishiraj/CatPPT-base/results_2024-02-26T18-41-56.705669.json +++ b/rishiraj/CatPPT-base/results_2024-02-26T18-41-56.705669.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6791700571775308, - "all_grouped_npm": 0.5261877414935174, + "all_grouped_average": 0.6964960968834307, + "all_grouped_npm": 0.5519705386749162, "all_grouped": { "enem_challenge": 0.6536039188243526, "bluex": 0.5591098748261474, @@ -45,7 +45,7 @@ "faquad_nli": 0.769428955491387, "hatebr_offensive": 0.8583629779928524, "portuguese_hate_speech": 0.6725854591241898, - "tweetsentbr": 0.46780307205929983 + "tweetsentbr": 0.6237374294123997 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6536039188243526, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.769428955491387, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8583629779928524, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6725854591241898, - "harness|tweetsentbr|tweetsentbr|None|25": 0.46780307205929983 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6237374294123997 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6536039188243526, @@ -150,9 +150,9 @@ "main_score": 0.6725854591241898 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.46780307205929983, + "f1_macro,all": 0.6237374294123997, "acc,all": 0.6965174129353234, - "main_score": 0.46780307205929983 + "main_score": 0.6237374294123997 } }, "config_tasks": { diff --git a/rishiraj/CatPPT/raw_2024-02-26T19-43-35.008850/results.json b/rishiraj/CatPPT/raw_2024-02-26T19-43-35.008850/results.json index bf8e1b4092b49ee2db2bac465c917ba966646df2..ee54082320318fee663b7064f1f411533e140537 100644 --- a/rishiraj/CatPPT/raw_2024-02-26T19-43-35.008850/results.json +++ b/rishiraj/CatPPT/raw_2024-02-26T19-43-35.008850/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9248100520349588, - "acc,all": 0.9248366013071896, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7800518971455536, - "mse,all": 0.4701062091503267, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5632823365785814, - "acc,exam_id__USP_2024": 0.7804878048780488, - "acc,exam_id__USP_2020": 0.5892857142857143, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2018": 0.5740740740740741, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2024": 0.5555555555555556, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2021": 0.5, - "acc,exam_id__UNICAMP_2023": 0.5348837209302325, - "acc,exam_id__USP_2019": 0.425, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6543037088873338, - "acc,exam_id__2011": 0.717948717948718, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2012": 0.6551724137931034, - "acc,exam_id__2015": 0.6302521008403361, - "acc,exam_id__2010": 0.6410256410256411, - "acc,exam_id__2023": 0.7111111111111111, - "acc,exam_id__2017": 0.646551724137931, - "acc,exam_id__2013": 0.6388888888888888, - "acc,exam_id__2014": 0.6422018348623854, - "acc,exam_id__2009": 0.6260869565217392, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2022": 0.6541353383458647 - }, - "faquad_nli": { - "f1_macro,all": 0.7700800415702798, - "acc,all": 0.8261538461538461, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8553480360180306, - "acc,all": 0.8564285714285714 - }, - "oab_exams": { - "acc,all": 0.428246013667426, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2015-16": 0.3375, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2010-02": 0.48, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2012-06a": 0.4625, - "acc,exam_id__2014-15": 0.5, - "acc,exam_id__2017-22": 0.5125, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2010-01": 0.4117647058823529, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2014-13": 0.3875, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2015-17": 0.5256410256410257, - "acc,exam_id__2011-05": 0.475, - "acc,exam_id__2013-11": 0.475, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2013-12": 0.3875, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2015-18": 0.425, - "acc,exam_id__2018-25": 0.4375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6792047113008148, - "acc,all": 0.6980023501762632 - }, - "tweetsentbr": { - "f1_macro,all": 0.46985479631500854, - "acc,all": 0.6985074626865672, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9248100520349588, + "acc,all": 0.9248366013071896, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7800518971455536, + "mse,all": 0.4701062091503267, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5632823365785814, + "acc,exam_id__USP_2024": 0.7804878048780488, + "acc,exam_id__USP_2020": 0.5892857142857143, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2018": 0.5740740740740741, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2024": 0.5555555555555556, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2021": 0.5, + "acc,exam_id__UNICAMP_2023": 0.5348837209302325, + "acc,exam_id__USP_2019": 0.425, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6543037088873338, + "acc,exam_id__2011": 0.717948717948718, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2012": 0.6551724137931034, + "acc,exam_id__2015": 0.6302521008403361, + "acc,exam_id__2010": 0.6410256410256411, + "acc,exam_id__2023": 0.7111111111111111, + "acc,exam_id__2017": 0.646551724137931, + "acc,exam_id__2013": 0.6388888888888888, + "acc,exam_id__2014": 0.6422018348623854, + "acc,exam_id__2009": 0.6260869565217392, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2022": 0.6541353383458647 + }, + "faquad_nli": { + "f1_macro,all": 0.7700800415702798, + "acc,all": 0.8261538461538461, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8553480360180306, + "acc,all": 0.8564285714285714 + }, + "oab_exams": { + "acc,all": 0.428246013667426, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2015-16": 0.3375, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2010-02": 0.48, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2012-06a": 0.4625, + "acc,exam_id__2014-15": 0.5, + "acc,exam_id__2017-22": 0.5125, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2010-01": 0.4117647058823529, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2014-13": 0.3875, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2015-17": 0.5256410256410257, + "acc,exam_id__2011-05": 0.475, + "acc,exam_id__2013-11": 0.475, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2013-12": 0.3875, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2015-18": 0.425, + "acc,exam_id__2018-25": 0.4375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6792047113008148, + "acc,all": 0.6980023501762632 + }, + "tweetsentbr": { + "f1_macro,all": 0.6264730617533447, + "acc,all": 0.6985074626865672, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "7cb7df3f5ba954a917e19aa2c78dde45b68610f2", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "7cb7df3f5ba954a917e19aa2c78dde45b68610f2", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=rishiraj/CatPPT,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=rishiraj/CatPPT,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/rishiraj/CatPPT/results_2024-02-26T19-43-35.008850.json b/rishiraj/CatPPT/results_2024-02-26T19-43-35.008850.json index dc367b9cfa638e481787bff0a706774522c4d7c6..7357e5ddea5ae9e2547d423d6e11eadf9653f796 100644 --- a/rishiraj/CatPPT/results_2024-02-26T19-43-35.008850.json +++ b/rishiraj/CatPPT/results_2024-02-26T19-43-35.008850.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6805757326131097, - "all_grouped_npm": 0.5283465491333306, + "all_grouped_average": 0.6979777621062582, + "all_grouped_npm": 0.5542424263552778, "all_grouped": { "enem_challenge": 0.6543037088873338, "bluex": 0.5632823365785814, @@ -45,7 +45,7 @@ "faquad_nli": 0.7700800415702798, "hatebr_offensive": 0.8553480360180306, "portuguese_hate_speech": 0.6792047113008148, - "tweetsentbr": 0.46985479631500854 + "tweetsentbr": 0.6264730617533447 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6543037088873338, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7700800415702798, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8553480360180306, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6792047113008148, - "harness|tweetsentbr|tweetsentbr|None|25": 0.46985479631500854 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6264730617533447 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6543037088873338, @@ -150,9 +150,9 @@ "main_score": 0.6792047113008148 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.46985479631500854, + "f1_macro,all": 0.6264730617533447, "acc,all": 0.6985074626865672, - "main_score": 0.46985479631500854 + "main_score": 0.6264730617533447 } }, "config_tasks": { diff --git a/rombodawg/Everyone-Coder-4x7b-Base/raw_2024-07-28T02-42-40.107595/results.json b/rombodawg/Everyone-Coder-4x7b-Base/raw_2024-07-28T02-42-40.107595/results.json index c73beb6bbb9e881f36b69e2c59968d409f0674f9..8cdca69c9756cffcd34b34ad8d31ff4fbd2fd471 100644 --- a/rombodawg/Everyone-Coder-4x7b-Base/raw_2024-07-28T02-42-40.107595/results.json +++ b/rombodawg/Everyone-Coder-4x7b-Base/raw_2024-07-28T02-42-40.107595/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8949068924338127, - "acc,all": 0.8950163398692811, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6764417479682397, - "mse,all": 0.7728308823529411, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5159944367176634, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__USP_2022": 0.46938775510204084, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__UNICAMP_2023": 0.5581395348837209, - "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, - "acc,exam_id__UNICAMP_2022": 0.6153846153846154, - "acc,exam_id__UNICAMP_2024": 0.6, - "acc,exam_id__UNICAMP_2018": 0.3888888888888889, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__USP_2021": 0.4807692307692308, - "acc,exam_id__UNICAMP_2021_2": 0.43137254901960786, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6410076976906928, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2013": 0.7037037037037037, - "acc,exam_id__2012": 0.646551724137931, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2016_2": 0.6422764227642277, - "acc,exam_id__2010": 0.6239316239316239, - "acc,exam_id__2009": 0.591304347826087, - "acc,exam_id__2011": 0.7264957264957265, - "acc,exam_id__2015": 0.6134453781512605, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2017": 0.5948275862068966, - "acc,exam_id__2022": 0.6691729323308271 - }, - "faquad_nli": { - "f1_macro,all": 0.7038566806008666, - "acc,all": 0.7892307692307692, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8454215242053973, - "acc,all": 0.8464285714285714 - }, - "oab_exams": { - "acc,all": 0.44646924829157175, - "acc,exam_id__2017-24": 0.3875, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2015-16": 0.425, - "acc,exam_id__2016-20a": 0.3875, - "acc,exam_id__2011-03": 0.3939393939393939, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2012-06": 0.525, - "acc,exam_id__2012-06a": 0.525, - "acc,exam_id__2013-11": 0.475, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2018-25": 0.4125, - "acc,exam_id__2014-13": 0.35, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2016-20": 0.425, - "acc,exam_id__2016-21": 0.425, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2015-18": 0.4625, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2014-15": 0.5512820512820513, - "acc,exam_id__2017-22": 0.475, - "acc,exam_id__2012-08": 0.4375, - "acc,exam_id__2015-17": 0.5641025641025641, - "acc,exam_id__2010-01": 0.4, - "acc,exam_id__2013-12": 0.55, - "acc,exam_id__2010-02": 0.44, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5980933434196943, - "acc,all": 0.6016451233842538 - }, - "tweetsentbr": { - "f1_macro,all": 0.4486777636649074, - "acc,all": 0.6880597014925374, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8949068924338127, + "acc,all": 0.8950163398692811, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6764417479682397, + "mse,all": 0.7728308823529411, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5159944367176634, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__USP_2022": 0.46938775510204084, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__UNICAMP_2023": 0.5581395348837209, + "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305, + "acc,exam_id__UNICAMP_2022": 0.6153846153846154, + "acc,exam_id__UNICAMP_2024": 0.6, + "acc,exam_id__UNICAMP_2018": 0.3888888888888889, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__USP_2021": 0.4807692307692308, + "acc,exam_id__UNICAMP_2021_2": 0.43137254901960786, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6410076976906928, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2013": 0.7037037037037037, + "acc,exam_id__2012": 0.646551724137931, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2016_2": 0.6422764227642277, + "acc,exam_id__2010": 0.6239316239316239, + "acc,exam_id__2009": 0.591304347826087, + "acc,exam_id__2011": 0.7264957264957265, + "acc,exam_id__2015": 0.6134453781512605, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2017": 0.5948275862068966, + "acc,exam_id__2022": 0.6691729323308271 + }, + "faquad_nli": { + "f1_macro,all": 0.7038566806008666, + "acc,all": 0.7892307692307692, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8454215242053973, + "acc,all": 0.8464285714285714 + }, + "oab_exams": { + "acc,all": 0.44646924829157175, + "acc,exam_id__2017-24": 0.3875, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2015-16": 0.425, + "acc,exam_id__2016-20a": 0.3875, + "acc,exam_id__2011-03": 0.3939393939393939, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2012-06": 0.525, + "acc,exam_id__2012-06a": 0.525, + "acc,exam_id__2013-11": 0.475, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2018-25": 0.4125, + "acc,exam_id__2014-13": 0.35, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2016-20": 0.425, + "acc,exam_id__2016-21": 0.425, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2015-18": 0.4625, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2014-15": 0.5512820512820513, + "acc,exam_id__2017-22": 0.475, + "acc,exam_id__2012-08": 0.4375, + "acc,exam_id__2015-17": 0.5641025641025641, + "acc,exam_id__2010-01": 0.4, + "acc,exam_id__2013-12": 0.55, + "acc,exam_id__2010-02": 0.44, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5980933434196943, + "acc,all": 0.6016451233842538 + }, + "tweetsentbr": { + "f1_macro,all": 0.5982370182198765, + "acc,all": 0.6880597014925374, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "5e4757ee9875ee65df9216dcc61208bd504d4632", - "model_dtype": "torch.float16", - "model_memory_footprint": 48844259328, - "model_num_parameters": 24153690112, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "5e4757ee9875ee65df9216dcc61208bd504d4632", + "model_dtype": "torch.float16", + "model_memory_footprint": 48844259328, + "model_num_parameters": 24153690112, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=rombodawg/Everyone-Coder-4x7b-Base,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=rombodawg/Everyone-Coder-4x7b-Base,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/rombodawg/Everyone-Coder-4x7b-Base/results_2024-07-28T02-42-40.107595.json b/rombodawg/Everyone-Coder-4x7b-Base/results_2024-07-28T02-42-40.107595.json index 7c33881bd7afc728b99c2ced89f108e66a6f99f2..86aaf3c222df65c39fc749efd9d8533ae206f1ca 100644 --- a/rombodawg/Everyone-Coder-4x7b-Base/results_2024-07-28T02-42-40.107595.json +++ b/rombodawg/Everyone-Coder-4x7b-Base/results_2024-07-28T02-42-40.107595.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.641207703888094, - "all_grouped_npm": 0.46773098545121783, + "all_grouped_average": 0.6578253988386461, + "all_grouped_npm": 0.4924596981752537, "all_grouped": { "enem_challenge": 0.6410076976906928, "bluex": 0.5159944367176634, @@ -45,7 +45,7 @@ "faquad_nli": 0.7038566806008666, "hatebr_offensive": 0.8454215242053973, "portuguese_hate_speech": 0.5980933434196943, - "tweetsentbr": 0.4486777636649074 + "tweetsentbr": 0.5982370182198765 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6410076976906928, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7038566806008666, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8454215242053973, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5980933434196943, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4486777636649074 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5982370182198765 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6410076976906928, @@ -150,9 +150,9 @@ "main_score": 0.5980933434196943 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4486777636649074, + "f1_macro,all": 0.5982370182198765, "acc,all": 0.6880597014925374, - "main_score": 0.4486777636649074 + "main_score": 0.5982370182198765 } }, "config_tasks": { diff --git a/royallab/ZephRP-m7b/raw_2024-05-19T16-08-35.867510/results.json b/royallab/ZephRP-m7b/raw_2024-05-19T16-08-35.867510/results.json index 3ec2399262944e3c22f9761b26e7188195e3b241..992813bdbb94a323a8d352bd3de276fca6677c75 100644 --- a/royallab/ZephRP-m7b/raw_2024-05-19T16-08-35.867510/results.json +++ b/royallab/ZephRP-m7b/raw_2024-05-19T16-08-35.867510/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8915411564964209, - "acc,all": 0.891748366013072, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.72912193009527, - "mse,all": 0.6166258169934641, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.47844228094575797, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__USP_2020": 0.4107142857142857, - "acc,exam_id__USP_2022": 0.5714285714285714, - "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, - "acc,exam_id__USP_2019": 0.35, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2018": 0.37037037037037035, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__UNICAMP_2023": 0.4186046511627907, - "acc,exam_id__USP_2021": 0.4230769230769231, - "acc,exam_id__UNICAMP_2019": 0.44, - "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, - "acc,exam_id__USP_2024": 0.6097560975609756, - "acc,exam_id__USP_2023": 0.6136363636363636, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5843247025892232, - "acc,exam_id__2011": 0.6923076923076923, - "acc,exam_id__2013": 0.5925925925925926, - "acc,exam_id__2023": 0.5777777777777777, - "acc,exam_id__2010": 0.5555555555555556, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2016_2": 0.5365853658536586, - "acc,exam_id__2015": 0.5546218487394958, - "acc,exam_id__2016": 0.5289256198347108, - "acc,exam_id__2017": 0.5862068965517241, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2022": 0.5639097744360902, - "acc,exam_id__2014": 0.5779816513761468 - }, - "faquad_nli": { - "f1_macro,all": 0.6992338771035772, - "acc,all": 0.7707692307692308, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8014948945480438, - "acc,all": 0.8042857142857143 - }, - "oab_exams": { - "acc,all": 0.41776765375854213, - "acc,exam_id__2017-22": 0.45, - "acc,exam_id__2017-23": 0.3875, - "acc,exam_id__2010-02": 0.45, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2014-13": 0.3875, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2017-24": 0.4, - "acc,exam_id__2012-08": 0.4, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2012-09": 0.4025974025974026, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2016-19": 0.48717948717948717, - "acc,exam_id__2015-18": 0.5, - "acc,exam_id__2016-20": 0.4375, - "acc,exam_id__2011-05": 0.4875, - "acc,exam_id__2014-15": 0.5384615384615384, - "acc,exam_id__2011-04": 0.3375, - "acc,exam_id__2016-21": 0.45, - "acc,exam_id__2015-17": 0.4358974358974359, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2012-06a": 0.3875, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2014-14": 0.45, - "acc,exam_id__2015-16": 0.3375, - "acc,exam_id__2016-20a": 0.3, - "acc,exam_id__2013-12": 0.4875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6240808366605732, - "acc,all": 0.6345475910693302 - }, - "tweetsentbr": { - "f1_macro,all": 0.4669073958966893, - "acc,all": 0.6895522388059702, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8915411564964209, + "acc,all": 0.891748366013072, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.72912193009527, + "mse,all": 0.6166258169934641, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.47844228094575797, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__USP_2020": 0.4107142857142857, + "acc,exam_id__USP_2022": 0.5714285714285714, + "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315, + "acc,exam_id__USP_2019": 0.35, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2018": 0.37037037037037035, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__UNICAMP_2023": 0.4186046511627907, + "acc,exam_id__USP_2021": 0.4230769230769231, + "acc,exam_id__UNICAMP_2019": 0.44, + "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826, + "acc,exam_id__USP_2024": 0.6097560975609756, + "acc,exam_id__USP_2023": 0.6136363636363636, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5843247025892232, + "acc,exam_id__2011": 0.6923076923076923, + "acc,exam_id__2013": 0.5925925925925926, + "acc,exam_id__2023": 0.5777777777777777, + "acc,exam_id__2010": 0.5555555555555556, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2016_2": 0.5365853658536586, + "acc,exam_id__2015": 0.5546218487394958, + "acc,exam_id__2016": 0.5289256198347108, + "acc,exam_id__2017": 0.5862068965517241, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2022": 0.5639097744360902, + "acc,exam_id__2014": 0.5779816513761468 + }, + "faquad_nli": { + "f1_macro,all": 0.6992338771035772, + "acc,all": 0.7707692307692308, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8014948945480438, + "acc,all": 0.8042857142857143 + }, + "oab_exams": { + "acc,all": 0.41776765375854213, + "acc,exam_id__2017-22": 0.45, + "acc,exam_id__2017-23": 0.3875, + "acc,exam_id__2010-02": 0.45, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2014-13": 0.3875, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2017-24": 0.4, + "acc,exam_id__2012-08": 0.4, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2012-09": 0.4025974025974026, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2016-19": 0.48717948717948717, + "acc,exam_id__2015-18": 0.5, + "acc,exam_id__2016-20": 0.4375, + "acc,exam_id__2011-05": 0.4875, + "acc,exam_id__2014-15": 0.5384615384615384, + "acc,exam_id__2011-04": 0.3375, + "acc,exam_id__2016-21": 0.45, + "acc,exam_id__2015-17": 0.4358974358974359, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2012-06a": 0.3875, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2014-14": 0.45, + "acc,exam_id__2015-16": 0.3375, + "acc,exam_id__2016-20a": 0.3, + "acc,exam_id__2013-12": 0.4875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6240808366605732, + "acc,all": 0.6345475910693302 + }, + "tweetsentbr": { + "f1_macro,all": 0.6225431945289192, + "acc,all": 0.6895522388059702, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "4ef26ff937765b3fb279151ba9af48fb42c03932", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "4ef26ff937765b3fb279151ba9af48fb42c03932", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=royallab/ZephRP-m7b,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=royallab/ZephRP-m7b,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/royallab/ZephRP-m7b/results_2024-05-19T16-08-35.867510.json b/royallab/ZephRP-m7b/results_2024-05-19T16-08-35.867510.json index 5b0c7ca62da7e6b3970e82c515b56074f9687f0e..0e63893bf055f131687ac277805202719f796889 100644 --- a/royallab/ZephRP-m7b/results_2024-05-19T16-08-35.867510.json +++ b/royallab/ZephRP-m7b/results_2024-05-19T16-08-35.867510.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6325460808993442, - "all_grouped_npm": 0.4531785602945303, + "all_grouped_average": 0.6498389474140365, + "all_grouped_npm": 0.47891199260806033, "all_grouped": { "enem_challenge": 0.5843247025892232, "bluex": 0.47844228094575797, @@ -45,7 +45,7 @@ "faquad_nli": 0.6992338771035772, "hatebr_offensive": 0.8014948945480438, "portuguese_hate_speech": 0.6240808366605732, - "tweetsentbr": 0.4669073958966893 + "tweetsentbr": 0.6225431945289192 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5843247025892232, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6992338771035772, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8014948945480438, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6240808366605732, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4669073958966893 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6225431945289192 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5843247025892232, @@ -150,9 +150,9 @@ "main_score": 0.6240808366605732 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4669073958966893, + "f1_macro,all": 0.6225431945289192, "acc,all": 0.6895522388059702, - "main_score": 0.4669073958966893 + "main_score": 0.6225431945289192 } }, "config_tasks": { diff --git a/saltlux/luxia-21.4b-alignment-v1.0/raw_2024-05-25T18-49-23.355684/results.json b/saltlux/luxia-21.4b-alignment-v1.0/raw_2024-05-25T18-49-23.355684/results.json index b7e5b8785b1a61d2b40692ac1f668063c9944b5b..0ea01be56c5280f73982c407d298e69ef4f93dc9 100644 --- a/saltlux/luxia-21.4b-alignment-v1.0/raw_2024-05-25T18-49-23.355684/results.json +++ b/saltlux/luxia-21.4b-alignment-v1.0/raw_2024-05-25T18-49-23.355684/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9060276242466888, - "acc,all": 0.9060457516339869, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8119072027277104, - "mse,all": 0.44578839869281045, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5716272600834492, - "acc,exam_id__UNICAMP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__USP_2023": 0.7272727272727273, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2021_1": 0.6304347826086957, - "acc,exam_id__USP_2020": 0.5535714285714286, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2021": 0.46153846153846156, - "acc,exam_id__USP_2019": 0.525, - "acc,exam_id__USP_2022": 0.6122448979591837, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6745976207137858, - "acc,exam_id__2011": 0.7948717948717948, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2015": 0.6638655462184874, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2010": 0.7094017094017094, - "acc,exam_id__2013": 0.6203703703703703, - "acc,exam_id__2014": 0.7064220183486238, - "acc,exam_id__2022": 0.6691729323308271, - "acc,exam_id__2023": 0.7037037037037037 - }, - "faquad_nli": { - "f1_macro,all": 0.6880394345731324, - "acc,all": 0.7215384615384616, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7294344250967385, - "acc,all": 0.7442857142857143 - }, - "oab_exams": { - "acc,all": 0.4378132118451025, - "acc,exam_id__2011-03": 0.41414141414141414, - "acc,exam_id__2014-13": 0.45, - "acc,exam_id__2013-10": 0.4, - "acc,exam_id__2017-24": 0.425, - "acc,exam_id__2017-22": 0.425, - "acc,exam_id__2012-06a": 0.5625, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2012-09": 0.36363636363636365, - "acc,exam_id__2015-16": 0.5, - "acc,exam_id__2011-04": 0.3875, - "acc,exam_id__2012-07": 0.425, - "acc,exam_id__2014-14": 0.475, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2010-02": 0.46, - "acc,exam_id__2015-18": 0.5125, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2012-06": 0.5125, - "acc,exam_id__2013-12": 0.475, - "acc,exam_id__2011-05": 0.35, - "acc,exam_id__2017-23": 0.375, - "acc,exam_id__2013-11": 0.425, - "acc,exam_id__2016-20": 0.425, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2015-17": 0.5512820512820513, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7396765935171477, - "acc,all": 0.7802585193889542 - }, - "tweetsentbr": { - "f1_macro,all": 0.4947628134435371, - "acc,all": 0.6915422885572139, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9060276242466888, + "acc,all": 0.9060457516339869, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8119072027277104, + "mse,all": 0.44578839869281045, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5716272600834492, + "acc,exam_id__UNICAMP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__USP_2023": 0.7272727272727273, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2021_1": 0.6304347826086957, + "acc,exam_id__USP_2020": 0.5535714285714286, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2021": 0.46153846153846156, + "acc,exam_id__USP_2019": 0.525, + "acc,exam_id__USP_2022": 0.6122448979591837, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6745976207137858, + "acc,exam_id__2011": 0.7948717948717948, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2015": 0.6638655462184874, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2010": 0.7094017094017094, + "acc,exam_id__2013": 0.6203703703703703, + "acc,exam_id__2014": 0.7064220183486238, + "acc,exam_id__2022": 0.6691729323308271, + "acc,exam_id__2023": 0.7037037037037037 + }, + "faquad_nli": { + "f1_macro,all": 0.6880394345731324, + "acc,all": 0.7215384615384616, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7294344250967385, + "acc,all": 0.7442857142857143 + }, + "oab_exams": { + "acc,all": 0.4378132118451025, + "acc,exam_id__2011-03": 0.41414141414141414, + "acc,exam_id__2014-13": 0.45, + "acc,exam_id__2013-10": 0.4, + "acc,exam_id__2017-24": 0.425, + "acc,exam_id__2017-22": 0.425, + "acc,exam_id__2012-06a": 0.5625, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2012-09": 0.36363636363636365, + "acc,exam_id__2015-16": 0.5, + "acc,exam_id__2011-04": 0.3875, + "acc,exam_id__2012-07": 0.425, + "acc,exam_id__2014-14": 0.475, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2010-02": 0.46, + "acc,exam_id__2015-18": 0.5125, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2012-06": 0.5125, + "acc,exam_id__2013-12": 0.475, + "acc,exam_id__2011-05": 0.35, + "acc,exam_id__2017-23": 0.375, + "acc,exam_id__2013-11": 0.425, + "acc,exam_id__2016-20": 0.425, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2015-17": 0.5512820512820513, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7396765935171477, + "acc,all": 0.7802585193889542 + }, + "tweetsentbr": { + "f1_macro,all": 0.6596837512580495, + "acc,all": 0.6915422885572139, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "87d5673e6d9f60462f195e9414a0bf6874c89ceb", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 42842973184, - "model_num_parameters": 21421479936, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1372.9162581699347, - "min_seq_length": 1349, - "max_seq_length": 1450, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1649.9162581699347, - "min_seq_length": 1626, - "max_seq_length": 1727, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1680.4075104311544, - "min_seq_length": 1305, - "max_seq_length": 2458, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1637.2841147655704, - "min_seq_length": 1367, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1589.9215384615384, - "min_seq_length": 1536, - "max_seq_length": 1728, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "87d5673e6d9f60462f195e9414a0bf6874c89ceb", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 42842973184, + "model_num_parameters": 21421479936, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1215.1128571428571, - "min_seq_length": 1190, - "max_seq_length": 1475, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1419.90569476082, - "min_seq_length": 1138, - "max_seq_length": 1933, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1372.9162581699347, + "min_seq_length": 1349, + "max_seq_length": 1450, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1649.9162581699347, + "min_seq_length": 1626, + "max_seq_length": 1727, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1680.4075104311544, + "min_seq_length": 1305, + "max_seq_length": 2458, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1637.2841147655704, + "min_seq_length": 1367, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1589.9215384615384, + "min_seq_length": 1536, + "max_seq_length": 1728, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1215.1128571428571, + "min_seq_length": 1190, + "max_seq_length": 1475, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1419.90569476082, + "min_seq_length": 1138, + "max_seq_length": 1933, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1674.6909518213865, + "min_seq_length": 1640, + "max_seq_length": 1709, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1446.6412935323383, + "min_seq_length": 1425, + "max_seq_length": 1559, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1674.6909518213865, - "min_seq_length": 1640, - "max_seq_length": 1709, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=saltlux/luxia-21.4b-alignment-v1.0,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1446.6412935323383, - "min_seq_length": 1425, - "max_seq_length": 1559, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=saltlux/luxia-21.4b-alignment-v1.0,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/saltlux/luxia-21.4b-alignment-v1.0/raw_2024-05-25T20-54-11.729257/results.json b/saltlux/luxia-21.4b-alignment-v1.0/raw_2024-05-25T20-54-11.729257/results.json index 40336e11e7527a820f28d2e2b25595c76c90b7f6..86b7585e6469eac00c9a894d8dbccceb4940673e 100644 --- a/saltlux/luxia-21.4b-alignment-v1.0/raw_2024-05-25T20-54-11.729257/results.json +++ b/saltlux/luxia-21.4b-alignment-v1.0/raw_2024-05-25T20-54-11.729257/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9043836192650903, - "acc,all": 0.9044117647058824, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8127132204493684, - "mse,all": 0.43958741830065357, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5799721835883171, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__USP_2023": 0.7272727272727273, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2021_1": 0.6304347826086957, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2022": 0.6923076923076923, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2021": 0.5, - "acc,exam_id__USP_2019": 0.525, - "acc,exam_id__USP_2022": 0.6122448979591837, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6731980405878236, - "acc,exam_id__2011": 0.7948717948717948, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2015": 0.680672268907563, - "acc,exam_id__2016": 0.6033057851239669, - "acc,exam_id__2016_2": 0.6585365853658537, - "acc,exam_id__2009": 0.6521739130434783, - "acc,exam_id__2012": 0.6206896551724138, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2013": 0.6203703703703703, - "acc,exam_id__2014": 0.7155963302752294, - "acc,exam_id__2022": 0.6691729323308271, - "acc,exam_id__2023": 0.6962962962962963 - }, - "faquad_nli": { - "f1_macro,all": 0.687367864693446, - "acc,all": 0.72, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7311358109405715, - "acc,all": 0.7457142857142857 - }, - "oab_exams": { - "acc,all": 0.4355353075170843, - "acc,exam_id__2011-03": 0.42424242424242425, - "acc,exam_id__2014-13": 0.4625, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2017-24": 0.4, - "acc,exam_id__2017-22": 0.4125, - "acc,exam_id__2012-06a": 0.55, - "acc,exam_id__2016-20a": 0.4375, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2015-16": 0.4875, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2012-07": 0.425, - "acc,exam_id__2014-14": 0.4625, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2010-02": 0.48, - "acc,exam_id__2015-18": 0.525, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2012-06": 0.5, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2011-05": 0.35, - "acc,exam_id__2017-23": 0.375, - "acc,exam_id__2013-11": 0.425, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2016-21": 0.3625, - "acc,exam_id__2018-25": 0.4125, - "acc,exam_id__2010-01": 0.4, - "acc,exam_id__2012-08": 0.35, - "acc,exam_id__2015-17": 0.5512820512820513, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7314765921823685, - "acc,all": 0.7696827262044653 - }, - "tweetsentbr": { - "f1_macro,all": 0.4950263563819566, - "acc,all": 0.6920398009950248, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9043836192650903, + "acc,all": 0.9044117647058824, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8127132204493684, + "mse,all": 0.43958741830065357, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5799721835883171, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__USP_2023": 0.7272727272727273, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2021_1": 0.6304347826086957, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2022": 0.6923076923076923, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2021": 0.5, + "acc,exam_id__USP_2019": 0.525, + "acc,exam_id__USP_2022": 0.6122448979591837, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6731980405878236, + "acc,exam_id__2011": 0.7948717948717948, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2015": 0.680672268907563, + "acc,exam_id__2016": 0.6033057851239669, + "acc,exam_id__2016_2": 0.6585365853658537, + "acc,exam_id__2009": 0.6521739130434783, + "acc,exam_id__2012": 0.6206896551724138, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2013": 0.6203703703703703, + "acc,exam_id__2014": 0.7155963302752294, + "acc,exam_id__2022": 0.6691729323308271, + "acc,exam_id__2023": 0.6962962962962963 + }, + "faquad_nli": { + "f1_macro,all": 0.687367864693446, + "acc,all": 0.72, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7311358109405715, + "acc,all": 0.7457142857142857 + }, + "oab_exams": { + "acc,all": 0.4355353075170843, + "acc,exam_id__2011-03": 0.42424242424242425, + "acc,exam_id__2014-13": 0.4625, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2017-24": 0.4, + "acc,exam_id__2017-22": 0.4125, + "acc,exam_id__2012-06a": 0.55, + "acc,exam_id__2016-20a": 0.4375, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2015-16": 0.4875, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2012-07": 0.425, + "acc,exam_id__2014-14": 0.4625, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2010-02": 0.48, + "acc,exam_id__2015-18": 0.525, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2012-06": 0.5, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2011-05": 0.35, + "acc,exam_id__2017-23": 0.375, + "acc,exam_id__2013-11": 0.425, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2016-21": 0.3625, + "acc,exam_id__2018-25": 0.4125, + "acc,exam_id__2010-01": 0.4, + "acc,exam_id__2012-08": 0.35, + "acc,exam_id__2015-17": 0.5512820512820513, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7314765921823685, + "acc,all": 0.7696827262044653 + }, + "tweetsentbr": { + "f1_macro,all": 0.6600351418426088, + "acc,all": 0.6920398009950248, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "87d5673e6d9f60462f195e9414a0bf6874c89ceb", - "model_dtype": "torch.float16", - "model_memory_footprint": 42842973184, - "model_num_parameters": 21421479936, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1372.9162581699347, - "min_seq_length": 1349, - "max_seq_length": 1450, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1649.9162581699347, - "min_seq_length": 1626, - "max_seq_length": 1727, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1680.4075104311544, - "min_seq_length": 1305, - "max_seq_length": 2458, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1637.2841147655704, - "min_seq_length": 1367, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1589.9215384615384, - "min_seq_length": 1536, - "max_seq_length": 1728, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "87d5673e6d9f60462f195e9414a0bf6874c89ceb", + "model_dtype": "torch.float16", + "model_memory_footprint": 42842973184, + "model_num_parameters": 21421479936, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1215.1128571428571, - "min_seq_length": 1190, - "max_seq_length": 1475, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1419.90569476082, - "min_seq_length": 1138, - "max_seq_length": 1933, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1372.9162581699347, + "min_seq_length": 1349, + "max_seq_length": 1450, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1649.9162581699347, + "min_seq_length": 1626, + "max_seq_length": 1727, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1680.4075104311544, + "min_seq_length": 1305, + "max_seq_length": 2458, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1637.2841147655704, + "min_seq_length": 1367, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1589.9215384615384, + "min_seq_length": 1536, + "max_seq_length": 1728, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1215.1128571428571, + "min_seq_length": 1190, + "max_seq_length": 1475, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1419.90569476082, + "min_seq_length": 1138, + "max_seq_length": 1933, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1674.6909518213865, + "min_seq_length": 1640, + "max_seq_length": 1709, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1446.6412935323383, + "min_seq_length": 1425, + "max_seq_length": 1559, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1674.6909518213865, - "min_seq_length": 1640, - "max_seq_length": 1709, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=saltlux/luxia-21.4b-alignment-v1.0,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1446.6412935323383, - "min_seq_length": 1425, - "max_seq_length": 1559, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=saltlux/luxia-21.4b-alignment-v1.0,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/saltlux/luxia-21.4b-alignment-v1.0/results_2024-05-25T18-49-23.355684.json b/saltlux/luxia-21.4b-alignment-v1.0/results_2024-05-25T18-49-23.355684.json index 5cc526496d0e8b224f46bd15897f4bf18e2e6ad4..32daa0a2038ee107b9d43f30c64e2fe297ccabd5 100644 --- a/saltlux/luxia-21.4b-alignment-v1.0/results_2024-05-25T18-49-23.355684.json +++ b/saltlux/luxia-21.4b-alignment-v1.0/results_2024-05-25T18-49-23.355684.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6726540206941436, - "all_grouped_npm": 0.505421989086423, + "all_grouped_average": 0.6909785693402006, + "all_grouped_npm": 0.5326906626668648, "all_grouped": { "enem_challenge": 0.6745976207137858, "bluex": 0.5716272600834492, @@ -45,7 +45,7 @@ "faquad_nli": 0.6880394345731324, "hatebr_offensive": 0.7294344250967385, "portuguese_hate_speech": 0.7396765935171477, - "tweetsentbr": 0.4947628134435371 + "tweetsentbr": 0.6596837512580495 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6745976207137858, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6880394345731324, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7294344250967385, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7396765935171477, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4947628134435371 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6596837512580495 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6745976207137858, @@ -150,9 +150,9 @@ "main_score": 0.7396765935171477 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4947628134435371, + "f1_macro,all": 0.6596837512580495, "acc,all": 0.6915422885572139, - "main_score": 0.4947628134435371 + "main_score": 0.6596837512580495 } }, "config_tasks": { diff --git a/saltlux/luxia-21.4b-alignment-v1.0/results_2024-05-25T20-54-11.729257.json b/saltlux/luxia-21.4b-alignment-v1.0/results_2024-05-25T20-54-11.729257.json index dcdba938529c138765ea4f340566567b905e06be..1db53b3131f5c7a44e564a2258b8c52ad45a2f85 100644 --- a/saltlux/luxia-21.4b-alignment-v1.0/results_2024-05-25T20-54-11.729257.json +++ b/saltlux/luxia-21.4b-alignment-v1.0/results_2024-05-25T20-54-11.729257.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6723121106228918, - "all_grouped_npm": 0.5043464834411824, + "all_grouped_average": 0.6906464201185198, + "all_grouped_npm": 0.5316296820953907, "all_grouped": { "enem_challenge": 0.6731980405878236, "bluex": 0.5799721835883171, @@ -45,7 +45,7 @@ "faquad_nli": 0.687367864693446, "hatebr_offensive": 0.7311358109405715, "portuguese_hate_speech": 0.7314765921823685, - "tweetsentbr": 0.4950263563819566 + "tweetsentbr": 0.6600351418426088 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6731980405878236, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.687367864693446, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7311358109405715, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7314765921823685, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4950263563819566 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6600351418426088 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6731980405878236, @@ -150,9 +150,9 @@ "main_score": 0.7314765921823685 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4950263563819566, + "f1_macro,all": 0.6600351418426088, "acc,all": 0.6920398009950248, - "main_score": 0.4950263563819566 + "main_score": 0.6600351418426088 } }, "config_tasks": { diff --git a/saltlux/luxia-21.4b-alignment-v1.2/raw_2024-06-12T23-35-46.058025/results.json b/saltlux/luxia-21.4b-alignment-v1.2/raw_2024-06-12T23-35-46.058025/results.json index f285dfd98b37666c716dd3ff8bdd520df2da5536..2eac532574297eeeb9c4feec8f88503dfd89f82f 100644 --- a/saltlux/luxia-21.4b-alignment-v1.2/raw_2024-06-12T23-35-46.058025/results.json +++ b/saltlux/luxia-21.4b-alignment-v1.2/raw_2024-06-12T23-35-46.058025/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9015343943160568, - "acc,all": 0.9015522875816994, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7850553241810303, - "mse,all": 0.6336784368014705, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5799721835883171, - "acc,exam_id__UNICAMP_2024": 0.6, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2018": 0.5, - "acc,exam_id__UNICAMP_2020": 0.5272727272727272, - "acc,exam_id__UNICAMP_2022": 0.6666666666666666, - "acc,exam_id__USP_2021": 0.5769230769230769, - "acc,exam_id__USP_2023": 0.7727272727272727, - "acc,exam_id__UNICAMP_2023": 0.5813953488372093, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__USP_2022": 0.5918367346938775, - "acc,exam_id__UNICAMP_2019": 0.66, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6829951014695591, - "acc,exam_id__2013": 0.6388888888888888, - "acc,exam_id__2023": 0.6666666666666666, - "acc,exam_id__2015": 0.6890756302521008, - "acc,exam_id__2014": 0.7155963302752294, - "acc,exam_id__2016": 0.6363636363636364, - "acc,exam_id__2017": 0.6982758620689655, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2012": 0.7068965517241379, - "acc,exam_id__2011": 0.7264957264957265, - "acc,exam_id__2022": 0.6691729323308271, - "acc,exam_id__2016_2": 0.6991869918699187, - "acc,exam_id__2009": 0.6521739130434783 - }, - "faquad_nli": { - "f1_macro,all": 0.5814184936920059, - "acc,all": 0.5938461538461538, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7940479868190712, - "acc,all": 0.8 - }, - "oab_exams": { - "acc,all": 0.420501138952164, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2010-02": 0.47, - "acc,exam_id__2011-04": 0.375, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2014-14": 0.475, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2012-06": 0.425, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2015-18": 0.4875, - "acc,exam_id__2012-07": 0.425, - "acc,exam_id__2014-13": 0.45, - "acc,exam_id__2010-01": 0.2823529411764706, - "acc,exam_id__2016-19": 0.38461538461538464, - "acc,exam_id__2018-25": 0.3625, - "acc,exam_id__2011-03": 0.41414141414141414, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2012-06a": 0.4625, - "acc,exam_id__2012-08": 0.35, - "acc,exam_id__2013-11": 0.4125, - "acc,exam_id__2015-16": 0.5125, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2017-22": 0.3875, - "acc,exam_id__2015-17": 0.48717948717948717, - "acc,exam_id__2017-24": 0.475, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.721159420289855, - "acc,all": 0.7555816686251469 - }, - "tweetsentbr": { - "f1_macro,all": 0.4960153896083912, - "acc,all": 0.6965174129353234, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9015343943160568, + "acc,all": 0.9015522875816994, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7850553241810303, + "mse,all": 0.6336784368014705, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5799721835883171, + "acc,exam_id__UNICAMP_2024": 0.6, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2018": 0.5, + "acc,exam_id__UNICAMP_2020": 0.5272727272727272, + "acc,exam_id__UNICAMP_2022": 0.6666666666666666, + "acc,exam_id__USP_2021": 0.5769230769230769, + "acc,exam_id__USP_2023": 0.7727272727272727, + "acc,exam_id__UNICAMP_2023": 0.5813953488372093, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__USP_2022": 0.5918367346938775, + "acc,exam_id__UNICAMP_2019": 0.66, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6829951014695591, + "acc,exam_id__2013": 0.6388888888888888, + "acc,exam_id__2023": 0.6666666666666666, + "acc,exam_id__2015": 0.6890756302521008, + "acc,exam_id__2014": 0.7155963302752294, + "acc,exam_id__2016": 0.6363636363636364, + "acc,exam_id__2017": 0.6982758620689655, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2012": 0.7068965517241379, + "acc,exam_id__2011": 0.7264957264957265, + "acc,exam_id__2022": 0.6691729323308271, + "acc,exam_id__2016_2": 0.6991869918699187, + "acc,exam_id__2009": 0.6521739130434783 + }, + "faquad_nli": { + "f1_macro,all": 0.5814184936920059, + "acc,all": 0.5938461538461538, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7940479868190712, + "acc,all": 0.8 + }, + "oab_exams": { + "acc,all": 0.420501138952164, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2010-02": 0.47, + "acc,exam_id__2011-04": 0.375, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2014-14": 0.475, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2012-06": 0.425, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2015-18": 0.4875, + "acc,exam_id__2012-07": 0.425, + "acc,exam_id__2014-13": 0.45, + "acc,exam_id__2010-01": 0.2823529411764706, + "acc,exam_id__2016-19": 0.38461538461538464, + "acc,exam_id__2018-25": 0.3625, + "acc,exam_id__2011-03": 0.41414141414141414, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2012-06a": 0.4625, + "acc,exam_id__2012-08": 0.35, + "acc,exam_id__2013-11": 0.4125, + "acc,exam_id__2015-16": 0.5125, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2017-22": 0.3875, + "acc,exam_id__2015-17": 0.48717948717948717, + "acc,exam_id__2017-24": 0.475, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.721159420289855, + "acc,all": 0.7555816686251469 + }, + "tweetsentbr": { + "f1_macro,all": 0.6613538528111883, + "acc,all": 0.6965174129353234, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "2ccd940ac9e45da92ee1dd6f2f6452ace08c9997", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 42842973184, - "model_num_parameters": 21421479936, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 4, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1372.9162581699347, - "min_seq_length": 1349, - "max_seq_length": 1450, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1649.9162581699347, - "min_seq_length": 1626, - "max_seq_length": 1727, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1680.4075104311544, - "min_seq_length": 1305, - "max_seq_length": 2458, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1637.2841147655704, - "min_seq_length": 1367, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1589.9215384615384, - "min_seq_length": 1536, - "max_seq_length": 1728, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "2ccd940ac9e45da92ee1dd6f2f6452ace08c9997", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 42842973184, + "model_num_parameters": 21421479936, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 4, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1215.1128571428571, - "min_seq_length": 1190, - "max_seq_length": 1475, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1419.90569476082, - "min_seq_length": 1138, - "max_seq_length": 1933, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1372.9162581699347, + "min_seq_length": 1349, + "max_seq_length": 1450, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1649.9162581699347, + "min_seq_length": 1626, + "max_seq_length": 1727, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1680.4075104311544, + "min_seq_length": 1305, + "max_seq_length": 2458, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1637.2841147655704, + "min_seq_length": 1367, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1589.9215384615384, + "min_seq_length": 1536, + "max_seq_length": 1728, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1215.1128571428571, + "min_seq_length": 1190, + "max_seq_length": 1475, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1419.90569476082, + "min_seq_length": 1138, + "max_seq_length": 1933, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1674.6909518213865, + "min_seq_length": 1640, + "max_seq_length": 1709, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1446.6412935323383, + "min_seq_length": 1425, + "max_seq_length": 1559, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1674.6909518213865, - "min_seq_length": 1640, - "max_seq_length": 1709, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=saltlux/luxia-21.4b-alignment-v1.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1446.6412935323383, - "min_seq_length": 1425, - "max_seq_length": 1559, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=saltlux/luxia-21.4b-alignment-v1.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "f2a0116" + "git_hash": "f2a0116" } \ No newline at end of file diff --git a/saltlux/luxia-21.4b-alignment-v1.2/results_2024-06-12T23-35-46.058025.json b/saltlux/luxia-21.4b-alignment-v1.2/results_2024-06-12T23-35-46.058025.json index 3f08efe00c52906d4ec98c98933a93c5d18e1a86..b4a32197a716f38e580f0f8d39af3e6ec7873012 100644 --- a/saltlux/luxia-21.4b-alignment-v1.2/results_2024-06-12T23-35-46.058025.json +++ b/saltlux/luxia-21.4b-alignment-v1.2/results_2024-06-12T23-35-46.058025.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6625221592129389, - "all_grouped_npm": 0.4900773770951115, + "all_grouped_average": 0.6808930995688052, + "all_grouped_npm": 0.5174150859580078, "all_grouped": { "enem_challenge": 0.6829951014695591, "bluex": 0.5799721835883171, @@ -45,7 +45,7 @@ "faquad_nli": 0.5814184936920059, "hatebr_offensive": 0.7940479868190712, "portuguese_hate_speech": 0.721159420289855, - "tweetsentbr": 0.4960153896083912 + "tweetsentbr": 0.6613538528111883 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6829951014695591, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.5814184936920059, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7940479868190712, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.721159420289855, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4960153896083912 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6613538528111883 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6829951014695591, @@ -150,9 +150,9 @@ "main_score": 0.721159420289855 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4960153896083912, + "f1_macro,all": 0.6613538528111883, "acc,all": 0.6965174129353234, - "main_score": 0.4960153896083912 + "main_score": 0.6613538528111883 } }, "config_tasks": { diff --git a/shadowml/BeagSake-7B/raw_2024-06-15T19-11-48.353198/results.json b/shadowml/BeagSake-7B/raw_2024-06-15T19-11-48.353198/results.json index 126aa0c47843044bf2c9043513af08507c3b06e3..32c3bab473df13f5dea6a98e9ce643d797dcd947 100644 --- a/shadowml/BeagSake-7B/raw_2024-06-15T19-11-48.353198/results.json +++ b/shadowml/BeagSake-7B/raw_2024-06-15T19-11-48.353198/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.46193313518767765, - "acc,all": 0.7066993464052288, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.5649979800601793, - "mse,all": 0.8163755196756205, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5118219749652295, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2023": 0.4883720930232558, - "acc,exam_id__UNICAMP_2020": 0.6363636363636364, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2021": 0.4423076923076923, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2019": 0.48, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2023": 0.5454545454545454, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2022": 0.5510204081632653, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.5899230230930721, - "acc,exam_id__2014": 0.6055045871559633, - "acc,exam_id__2023": 0.6518518518518519, - "acc,exam_id__2012": 0.5431034482758621, - "acc,exam_id__2013": 0.6111111111111112, - "acc,exam_id__2022": 0.5864661654135338, - "acc,exam_id__2015": 0.5714285714285714, - "acc,exam_id__2010": 0.5811965811965812, - "acc,exam_id__2016": 0.512396694214876, - "acc,exam_id__2009": 0.5826086956521739, - "acc,exam_id__2016_2": 0.5853658536585366, - "acc,exam_id__2011": 0.6324786324786325, - "acc,exam_id__2017": 0.6120689655172413 - }, - "faquad_nli": { - "f1_macro,all": 0.5768283186744451, - "acc,all": 0.7646153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.6577509670220215, - "acc,all": 0.6735714285714286 - }, - "oab_exams": { - "acc,all": 0.4054669703872437, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2016-21": 0.3375, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2014-14": 0.475, - "acc,exam_id__2017-23": 0.4, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2015-17": 0.48717948717948717, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2015-16": 0.35, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2012-09": 0.36363636363636365, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2016-20a": 0.3375, - "acc,exam_id__2010-02": 0.41, - "acc,exam_id__2013-11": 0.4875, - "acc,exam_id__2014-13": 0.3375, - "acc,exam_id__2012-08": 0.3875, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2013-10": 0.4, - "acc,exam_id__2012-06a": 0.4125, - "acc,exam_id__2013-12": 0.425, - "acc,exam_id__2016-20": 0.35, - "acc,exam_id__2015-18": 0.4125, - "acc,exam_id__2011-04": 0.4, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.5767432607181935, - "acc,all": 0.618096357226792 - }, - "tweetsentbr": { - "f1_macro,all": 0.4010702871344155, - "acc,all": 0.5611940298507463, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.6928997027815162, + "acc,all": 0.7066993464052288, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.5649979800601793, + "mse,all": 0.8163755196756205, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5118219749652295, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2023": 0.4883720930232558, + "acc,exam_id__UNICAMP_2020": 0.6363636363636364, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2021": 0.4423076923076923, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2019": 0.48, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2023": 0.5454545454545454, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2022": 0.5510204081632653, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.5899230230930721, + "acc,exam_id__2014": 0.6055045871559633, + "acc,exam_id__2023": 0.6518518518518519, + "acc,exam_id__2012": 0.5431034482758621, + "acc,exam_id__2013": 0.6111111111111112, + "acc,exam_id__2022": 0.5864661654135338, + "acc,exam_id__2015": 0.5714285714285714, + "acc,exam_id__2010": 0.5811965811965812, + "acc,exam_id__2016": 0.512396694214876, + "acc,exam_id__2009": 0.5826086956521739, + "acc,exam_id__2016_2": 0.5853658536585366, + "acc,exam_id__2011": 0.6324786324786325, + "acc,exam_id__2017": 0.6120689655172413 + }, + "faquad_nli": { + "f1_macro,all": 0.5768283186744451, + "acc,all": 0.7646153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.6577509670220215, + "acc,all": 0.6735714285714286 + }, + "oab_exams": { + "acc,all": 0.4054669703872437, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2016-21": 0.3375, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2014-14": 0.475, + "acc,exam_id__2017-23": 0.4, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2015-17": 0.48717948717948717, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2015-16": 0.35, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2012-09": 0.36363636363636365, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2016-20a": 0.3375, + "acc,exam_id__2010-02": 0.41, + "acc,exam_id__2013-11": 0.4875, + "acc,exam_id__2014-13": 0.3375, + "acc,exam_id__2012-08": 0.3875, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2013-10": 0.4, + "acc,exam_id__2012-06a": 0.4125, + "acc,exam_id__2013-12": 0.425, + "acc,exam_id__2016-20": 0.35, + "acc,exam_id__2015-18": 0.4125, + "acc,exam_id__2011-04": 0.4, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5767432607181935, + "acc,all": 0.618096357226792 + }, + "tweetsentbr": { + "f1_macro,all": 0.5347603828458874, + "acc,all": 0.5611940298507463, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "b7a3b25a188a4608fd05fc4247ddd504c1f529d1", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020343296, - "model_num_parameters": 7241732096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "b7a3b25a188a4608fd05fc4247ddd504c1f529d1", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020343296, + "model_num_parameters": 7241732096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=shadowml/BeagSake-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=shadowml/BeagSake-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/shadowml/BeagSake-7B/results_2024-06-15T19-11-48.353198.json b/shadowml/BeagSake-7B/results_2024-06-15T19-11-48.353198.json index 12a9270f6707e294a18bbf29d5e985548122a2b0..cd8ada656a9a729c4321252b8183f2e8fc3df49f 100644 --- a/shadowml/BeagSake-7B/results_2024-06-15T19-11-48.353198.json +++ b/shadowml/BeagSake-7B/results_2024-06-15T19-11-48.353198.json @@ -34,29 +34,29 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5273928796936087, - "all_grouped_npm": 0.2652895089662437, + "all_grouped_average": 0.5679102867275321, + "all_grouped_npm": 0.33872025674362316, "all_grouped": { "enem_challenge": 0.5899230230930721, "bluex": 0.5118219749652295, "oab_exams": 0.4054669703872437, - "assin2_rte": 0.46193313518767765, + "assin2_rte": 0.6928997027815162, "assin2_sts": 0.5649979800601793, "faquad_nli": 0.5768283186744451, "hatebr_offensive": 0.6577509670220215, "portuguese_hate_speech": 0.5767432607181935, - "tweetsentbr": 0.4010702871344155 + "tweetsentbr": 0.5347603828458874 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.5899230230930721, "harness|bluex|bluex|None|3": 0.5118219749652295, "harness|oab_exams|oab_exams|None|3": 0.4054669703872437, - "harness|assin2_rte|assin2_rte|None|15": 0.46193313518767765, + "harness|assin2_rte|assin2_rte|None|15": 0.6928997027815162, "harness|assin2_sts|assin2_sts|None|15": 0.5649979800601793, "harness|faquad_nli|faquad_nli|None|15": 0.5768283186744451, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.6577509670220215, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5767432607181935, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4010702871344155 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5347603828458874 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.5899230230930721, @@ -125,9 +125,9 @@ "main_score": 0.4054669703872437 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.46193313518767765, + "f1_macro,all": 0.6928997027815162, "acc,all": 0.7066993464052288, - "main_score": 0.46193313518767765 + "main_score": 0.6928997027815162 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0.5649979800601793, @@ -150,9 +150,9 @@ "main_score": 0.5767432607181935 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4010702871344155, + "f1_macro,all": 0.5347603828458874, "acc,all": 0.5611940298507463, - "main_score": 0.4010702871344155 + "main_score": 0.5347603828458874 } }, "config_tasks": { diff --git a/ssmits/Falcon2-5.5B-Portuguese/raw_2024-05-28T14-17-43.082234/results.json b/ssmits/Falcon2-5.5B-Portuguese/raw_2024-05-28T14-17-43.082234/results.json index 52de15cbe01d7127e82c71a9f2f671678ac23a2f..97472c062287ca647868e1137d1bc0b87704b09b 100644 --- a/ssmits/Falcon2-5.5B-Portuguese/raw_2024-05-28T14-17-43.082234/results.json +++ b/ssmits/Falcon2-5.5B-Portuguese/raw_2024-05-28T14-17-43.082234/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.0005437737901033171, - "acc,all": 0.0004084967320261438, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0, - "mse,all": 3.062949346405229, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.008344923504867872, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__USP_2023": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__USP_2024": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__USP_2020": 0.0, - "acc,exam_id__UNICAMP_2020": 0.03636363636363636, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__UNICAMP_2019": 0.0, - "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, - "acc,exam_id__USP_2018": 0.037037037037037035, - "acc,exam_id__USP_2021": 0.019230769230769232, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__USP_2022": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.006298110566829951, - "acc,exam_id__2011": 0.0, - "acc,exam_id__2017": 0.017241379310344827, - "acc,exam_id__2015": 0.008403361344537815, - "acc,exam_id__2016": 0.01652892561983471, - "acc,exam_id__2016_2": 0.008130081300813009, - "acc,exam_id__2009": 0.008695652173913044, - "acc,exam_id__2012": 0.008620689655172414, - "acc,exam_id__2010": 0.0, - "acc,exam_id__2013": 0.0, - "acc,exam_id__2014": 0.009174311926605505, - "acc,exam_id__2022": 0.0, - "acc,exam_id__2023": 0.0 - }, - "faquad_nli": { - "f1_macro,all": 0.029411764705882356, - "acc,all": 0.036923076923076927, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.0, - "acc,all": 0.0 - }, - "oab_exams": { - "acc,all": 0.004555808656036446, - "acc,exam_id__2011-03": 0.0, - "acc,exam_id__2014-13": 0.0, - "acc,exam_id__2013-10": 0.0, - "acc,exam_id__2017-24": 0.0, - "acc,exam_id__2017-22": 0.0, - "acc,exam_id__2012-06a": 0.0, - "acc,exam_id__2016-20a": 0.0125, - "acc,exam_id__2012-09": 0.012987012987012988, - "acc,exam_id__2015-16": 0.0, - "acc,exam_id__2011-04": 0.0125, - "acc,exam_id__2012-07": 0.0, - "acc,exam_id__2014-14": 0.025, - "acc,exam_id__2014-15": 0.0, - "acc,exam_id__2010-02": 0.0, - "acc,exam_id__2015-18": 0.0, - "acc,exam_id__2016-19": 0.01282051282051282, - "acc,exam_id__2012-06": 0.025, - "acc,exam_id__2013-12": 0.0, - "acc,exam_id__2011-05": 0.0, - "acc,exam_id__2017-23": 0.0, - "acc,exam_id__2013-11": 0.0, - "acc,exam_id__2016-20": 0.0, - "acc,exam_id__2016-21": 0.0, - "acc,exam_id__2018-25": 0.0, - "acc,exam_id__2010-01": 0.011764705882352941, - "acc,exam_id__2012-08": 0.0125, - "acc,exam_id__2015-17": 0.0, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.0011129660545353367, - "acc,all": 0.0011750881316098707 - }, - "tweetsentbr": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.0008156606851549756, + "acc,all": 0.0004084967320261438, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0, + "mse,all": 3.062949346405229, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.008344923504867872, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__USP_2023": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__USP_2024": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__USP_2020": 0.0, + "acc,exam_id__UNICAMP_2020": 0.03636363636363636, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__UNICAMP_2019": 0.0, + "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, + "acc,exam_id__USP_2018": 0.037037037037037035, + "acc,exam_id__USP_2021": 0.019230769230769232, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__USP_2022": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.006298110566829951, + "acc,exam_id__2011": 0.0, + "acc,exam_id__2017": 0.017241379310344827, + "acc,exam_id__2015": 0.008403361344537815, + "acc,exam_id__2016": 0.01652892561983471, + "acc,exam_id__2016_2": 0.008130081300813009, + "acc,exam_id__2009": 0.008695652173913044, + "acc,exam_id__2012": 0.008620689655172414, + "acc,exam_id__2010": 0.0, + "acc,exam_id__2013": 0.0, + "acc,exam_id__2014": 0.009174311926605505, + "acc,exam_id__2022": 0.0, + "acc,exam_id__2023": 0.0 + }, + "faquad_nli": { + "f1_macro,all": 0.04411764705882353, + "acc,all": 0.036923076923076927, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.0, + "acc,all": 0.0 + }, + "oab_exams": { + "acc,all": 0.004555808656036446, + "acc,exam_id__2011-03": 0.0, + "acc,exam_id__2014-13": 0.0, + "acc,exam_id__2013-10": 0.0, + "acc,exam_id__2017-24": 0.0, + "acc,exam_id__2017-22": 0.0, + "acc,exam_id__2012-06a": 0.0, + "acc,exam_id__2016-20a": 0.0125, + "acc,exam_id__2012-09": 0.012987012987012988, + "acc,exam_id__2015-16": 0.0, + "acc,exam_id__2011-04": 0.0125, + "acc,exam_id__2012-07": 0.0, + "acc,exam_id__2014-14": 0.025, + "acc,exam_id__2014-15": 0.0, + "acc,exam_id__2010-02": 0.0, + "acc,exam_id__2015-18": 0.0, + "acc,exam_id__2016-19": 0.01282051282051282, + "acc,exam_id__2012-06": 0.025, + "acc,exam_id__2013-12": 0.0, + "acc,exam_id__2011-05": 0.0, + "acc,exam_id__2017-23": 0.0, + "acc,exam_id__2013-11": 0.0, + "acc,exam_id__2016-20": 0.0, + "acc,exam_id__2016-21": 0.0, + "acc,exam_id__2018-25": 0.0, + "acc,exam_id__2010-01": 0.011764705882352941, + "acc,exam_id__2012-08": 0.0125, + "acc,exam_id__2015-17": 0.0, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.0016694490818030048, + "acc,all": 0.0011750881316098707 + }, + "tweetsentbr": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "917057d3ed9647249ddc21ee9311550493ac653d", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 11048279040, - "model_num_parameters": 5465415680, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1406.938725490196, - "min_seq_length": 1384, - "max_seq_length": 1474, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1598.938725490196, - "min_seq_length": 1576, - "max_seq_length": 1666, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1604.2573018080668, - "min_seq_length": 1263, - "max_seq_length": 2307, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1499.3505948215536, - "min_seq_length": 1264, - "max_seq_length": 2497, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1561.2353846153846, - "min_seq_length": 1514, - "max_seq_length": 1658, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "917057d3ed9647249ddc21ee9311550493ac653d", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 11048279040, + "model_num_parameters": 5465415680, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1398.4871428571428, - "min_seq_length": 1378, - "max_seq_length": 1630, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1283.3835990888383, - "min_seq_length": 1043, - "max_seq_length": 1724, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1406.938725490196, + "min_seq_length": 1384, + "max_seq_length": 1474, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1598.938725490196, + "min_seq_length": 1576, + "max_seq_length": 1666, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1604.2573018080668, + "min_seq_length": 1263, + "max_seq_length": 2307, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1499.3505948215536, + "min_seq_length": 1264, + "max_seq_length": 2497, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1561.2353846153846, + "min_seq_length": 1514, + "max_seq_length": 1658, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1398.4871428571428, + "min_seq_length": 1378, + "max_seq_length": 1630, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1283.3835990888383, + "min_seq_length": 1043, + "max_seq_length": 1724, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1911.7309048178613, + "min_seq_length": 1878, + "max_seq_length": 1948, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1638.6781094527364, + "min_seq_length": 1620, + "max_seq_length": 1691, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1911.7309048178613, - "min_seq_length": 1878, - "max_seq_length": 1948, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=ssmits/Falcon2-5.5B-Portuguese,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1638.6781094527364, - "min_seq_length": 1620, - "max_seq_length": 1691, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=ssmits/Falcon2-5.5B-Portuguese,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/ssmits/Falcon2-5.5B-Portuguese/results_2024-05-28T14-17-43.082234.json b/ssmits/Falcon2-5.5B-Portuguese/results_2024-05-28T14-17-43.082234.json index ec3582e2a78c491d330b2679fb49b076aa0f956b..e2e47109e3b70438ece782e75e9eda94badc9327 100644 --- a/ssmits/Falcon2-5.5B-Portuguese/results_2024-05-28T14-17-43.082234.json +++ b/ssmits/Falcon2-5.5B-Portuguese/results_2024-05-28T14-17-43.082234.json @@ -34,28 +34,28 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.005585260808695031, - "all_grouped_npm": -0.559707560776947, + "all_grouped_average": 0.00731128883927953, + "all_grouped_npm": -0.5565248106235051, "all_grouped": { "enem_challenge": 0.006298110566829951, "bluex": 0.008344923504867872, "oab_exams": 0.004555808656036446, - "assin2_rte": 0.0005437737901033171, + "assin2_rte": 0.0008156606851549756, "assin2_sts": 0.0, - "faquad_nli": 0.029411764705882356, + "faquad_nli": 0.04411764705882353, "hatebr_offensive": 0.0, - "portuguese_hate_speech": 0.0011129660545353367, + "portuguese_hate_speech": 0.0016694490818030048, "tweetsentbr": 0.0 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.006298110566829951, "harness|bluex|bluex|None|3": 0.008344923504867872, "harness|oab_exams|oab_exams|None|3": 0.004555808656036446, - "harness|assin2_rte|assin2_rte|None|15": 0.0005437737901033171, + "harness|assin2_rte|assin2_rte|None|15": 0.0008156606851549756, "harness|assin2_sts|assin2_sts|None|15": 0, - "harness|faquad_nli|faquad_nli|None|15": 0.029411764705882356, + "harness|faquad_nli|faquad_nli|None|15": 0.04411764705882353, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.0, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0011129660545353367, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0016694490818030048, "harness|tweetsentbr|tweetsentbr|None|25": 0.0 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -125,9 +125,9 @@ "main_score": 0.004555808656036446 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.0005437737901033171, + "f1_macro,all": 0.0008156606851549756, "acc,all": 0.0004084967320261438, - "main_score": 0.0005437737901033171 + "main_score": 0.0008156606851549756 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0, @@ -135,9 +135,9 @@ "main_score": 0 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.029411764705882356, + "f1_macro,all": 0.04411764705882353, "acc,all": 0.036923076923076927, - "main_score": 0.029411764705882356 + "main_score": 0.04411764705882353 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.0, @@ -145,9 +145,9 @@ "main_score": 0.0 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.0011129660545353367, + "f1_macro,all": 0.0016694490818030048, "acc,all": 0.0011750881316098707, - "main_score": 0.0011129660545353367 + "main_score": 0.0016694490818030048 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.0, diff --git a/ssmits/Falcon2-5.5B-multilingual/raw_2024-05-28T17-48-59.310480/results.json b/ssmits/Falcon2-5.5B-multilingual/raw_2024-05-28T17-48-59.310480/results.json index 3de453cb8cb0011914c96addc4bfa9f361a2f0a1..9f7791cc1cfc6ad69f1f1a9e58ec1c14189fdd5b 100644 --- a/ssmits/Falcon2-5.5B-multilingual/raw_2024-05-28T17-48-59.310480/results.json +++ b/ssmits/Falcon2-5.5B-multilingual/raw_2024-05-28T17-48-59.310480/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.0005437737901033171, - "acc,all": 0.0004084967320261438, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0, - "mse,all": 3.062949346405229, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.008344923504867872, - "acc,exam_id__UNICAMP_2018": 0.0, - "acc,exam_id__UNICAMP_2023": 0.0, - "acc,exam_id__USP_2023": 0.0, - "acc,exam_id__UNICAMP_2024": 0.0, - "acc,exam_id__USP_2024": 0.0, - "acc,exam_id__UNICAMP_2021_1": 0.0, - "acc,exam_id__USP_2020": 0.0, - "acc,exam_id__UNICAMP_2020": 0.03636363636363636, - "acc,exam_id__UNICAMP_2022": 0.0, - "acc,exam_id__UNICAMP_2019": 0.0, - "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, - "acc,exam_id__USP_2018": 0.037037037037037035, - "acc,exam_id__USP_2021": 0.019230769230769232, - "acc,exam_id__USP_2019": 0.0, - "acc,exam_id__USP_2022": 0.0, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.006298110566829951, - "acc,exam_id__2011": 0.0, - "acc,exam_id__2017": 0.017241379310344827, - "acc,exam_id__2015": 0.008403361344537815, - "acc,exam_id__2016": 0.01652892561983471, - "acc,exam_id__2016_2": 0.008130081300813009, - "acc,exam_id__2009": 0.008695652173913044, - "acc,exam_id__2012": 0.008620689655172414, - "acc,exam_id__2010": 0.0, - "acc,exam_id__2013": 0.0, - "acc,exam_id__2014": 0.009174311926605505, - "acc,exam_id__2022": 0.0, - "acc,exam_id__2023": 0.0 - }, - "faquad_nli": { - "f1_macro,all": 0.029411764705882356, - "acc,all": 0.036923076923076927, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.0, - "acc,all": 0.0 - }, - "oab_exams": { - "acc,all": 0.004555808656036446, - "acc,exam_id__2011-03": 0.0, - "acc,exam_id__2014-13": 0.0, - "acc,exam_id__2013-10": 0.0, - "acc,exam_id__2017-24": 0.0, - "acc,exam_id__2017-22": 0.0, - "acc,exam_id__2012-06a": 0.0, - "acc,exam_id__2016-20a": 0.0125, - "acc,exam_id__2012-09": 0.012987012987012988, - "acc,exam_id__2015-16": 0.0, - "acc,exam_id__2011-04": 0.0125, - "acc,exam_id__2012-07": 0.0, - "acc,exam_id__2014-14": 0.025, - "acc,exam_id__2014-15": 0.0, - "acc,exam_id__2010-02": 0.0, - "acc,exam_id__2015-18": 0.0, - "acc,exam_id__2016-19": 0.01282051282051282, - "acc,exam_id__2012-06": 0.025, - "acc,exam_id__2013-12": 0.0, - "acc,exam_id__2011-05": 0.0, - "acc,exam_id__2017-23": 0.0, - "acc,exam_id__2013-11": 0.0, - "acc,exam_id__2016-20": 0.0, - "acc,exam_id__2016-21": 0.0, - "acc,exam_id__2018-25": 0.0, - "acc,exam_id__2010-01": 0.011764705882352941, - "acc,exam_id__2012-08": 0.0125, - "acc,exam_id__2015-17": 0.0, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.0011129660545353367, - "acc,all": 0.0011750881316098707 - }, - "tweetsentbr": { - "f1_macro,all": 0.0, - "acc,all": 0.0, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.0008156606851549756, + "acc,all": 0.0004084967320261438, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0, + "mse,all": 3.062949346405229, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.008344923504867872, + "acc,exam_id__UNICAMP_2018": 0.0, + "acc,exam_id__UNICAMP_2023": 0.0, + "acc,exam_id__USP_2023": 0.0, + "acc,exam_id__UNICAMP_2024": 0.0, + "acc,exam_id__USP_2024": 0.0, + "acc,exam_id__UNICAMP_2021_1": 0.0, + "acc,exam_id__USP_2020": 0.0, + "acc,exam_id__UNICAMP_2020": 0.03636363636363636, + "acc,exam_id__UNICAMP_2022": 0.0, + "acc,exam_id__UNICAMP_2019": 0.0, + "acc,exam_id__UNICAMP_2021_2": 0.0196078431372549, + "acc,exam_id__USP_2018": 0.037037037037037035, + "acc,exam_id__USP_2021": 0.019230769230769232, + "acc,exam_id__USP_2019": 0.0, + "acc,exam_id__USP_2022": 0.0, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.006298110566829951, + "acc,exam_id__2011": 0.0, + "acc,exam_id__2017": 0.017241379310344827, + "acc,exam_id__2015": 0.008403361344537815, + "acc,exam_id__2016": 0.01652892561983471, + "acc,exam_id__2016_2": 0.008130081300813009, + "acc,exam_id__2009": 0.008695652173913044, + "acc,exam_id__2012": 0.008620689655172414, + "acc,exam_id__2010": 0.0, + "acc,exam_id__2013": 0.0, + "acc,exam_id__2014": 0.009174311926605505, + "acc,exam_id__2022": 0.0, + "acc,exam_id__2023": 0.0 + }, + "faquad_nli": { + "f1_macro,all": 0.04411764705882353, + "acc,all": 0.036923076923076927, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.0, + "acc,all": 0.0 + }, + "oab_exams": { + "acc,all": 0.004555808656036446, + "acc,exam_id__2011-03": 0.0, + "acc,exam_id__2014-13": 0.0, + "acc,exam_id__2013-10": 0.0, + "acc,exam_id__2017-24": 0.0, + "acc,exam_id__2017-22": 0.0, + "acc,exam_id__2012-06a": 0.0, + "acc,exam_id__2016-20a": 0.0125, + "acc,exam_id__2012-09": 0.012987012987012988, + "acc,exam_id__2015-16": 0.0, + "acc,exam_id__2011-04": 0.0125, + "acc,exam_id__2012-07": 0.0, + "acc,exam_id__2014-14": 0.025, + "acc,exam_id__2014-15": 0.0, + "acc,exam_id__2010-02": 0.0, + "acc,exam_id__2015-18": 0.0, + "acc,exam_id__2016-19": 0.01282051282051282, + "acc,exam_id__2012-06": 0.025, + "acc,exam_id__2013-12": 0.0, + "acc,exam_id__2011-05": 0.0, + "acc,exam_id__2017-23": 0.0, + "acc,exam_id__2013-11": 0.0, + "acc,exam_id__2016-20": 0.0, + "acc,exam_id__2016-21": 0.0, + "acc,exam_id__2018-25": 0.0, + "acc,exam_id__2010-01": 0.011764705882352941, + "acc,exam_id__2012-08": 0.0125, + "acc,exam_id__2015-17": 0.0, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.0016694490818030048, + "acc,all": 0.0011750881316098707 + }, + "tweetsentbr": { + "f1_macro,all": 0.0, + "acc,all": 0.0, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "e599f82e3d3b0d73f2f3515346f9b855e7b1b6fa", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 11048279040, - "model_num_parameters": 5465415680, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1406.938725490196, - "min_seq_length": 1384, - "max_seq_length": 1474, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1598.938725490196, - "min_seq_length": 1576, - "max_seq_length": 1666, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1604.2573018080668, - "min_seq_length": 1263, - "max_seq_length": 2307, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1499.3505948215536, - "min_seq_length": 1264, - "max_seq_length": 2497, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1561.2353846153846, - "min_seq_length": 1514, - "max_seq_length": 1658, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "e599f82e3d3b0d73f2f3515346f9b855e7b1b6fa", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 11048279040, + "model_num_parameters": 5465415680, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1398.4871428571428, - "min_seq_length": 1378, - "max_seq_length": 1630, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1283.3835990888383, - "min_seq_length": 1043, - "max_seq_length": 1724, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1406.938725490196, + "min_seq_length": 1384, + "max_seq_length": 1474, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1598.938725490196, + "min_seq_length": 1576, + "max_seq_length": 1666, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1604.2573018080668, + "min_seq_length": 1263, + "max_seq_length": 2307, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1499.3505948215536, + "min_seq_length": 1264, + "max_seq_length": 2497, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1561.2353846153846, + "min_seq_length": 1514, + "max_seq_length": 1658, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1398.4871428571428, + "min_seq_length": 1378, + "max_seq_length": 1630, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1283.3835990888383, + "min_seq_length": 1043, + "max_seq_length": 1724, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1911.7309048178613, + "min_seq_length": 1878, + "max_seq_length": 1948, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1638.6781094527364, + "min_seq_length": 1620, + "max_seq_length": 1691, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1911.7309048178613, - "min_seq_length": 1878, - "max_seq_length": 1948, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=ssmits/Falcon2-5.5B-multilingual,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1638.6781094527364, - "min_seq_length": 1620, - "max_seq_length": 1691, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=ssmits/Falcon2-5.5B-multilingual,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/ssmits/Falcon2-5.5B-multilingual/results_2024-05-28T17-48-59.310480.json b/ssmits/Falcon2-5.5B-multilingual/results_2024-05-28T17-48-59.310480.json index e6cb90869d33dada903ed5ffef417049175a628d..f8f2a5073e8b60e6161cf5f5df9b288124680c2f 100644 --- a/ssmits/Falcon2-5.5B-multilingual/results_2024-05-28T17-48-59.310480.json +++ b/ssmits/Falcon2-5.5B-multilingual/results_2024-05-28T17-48-59.310480.json @@ -34,28 +34,28 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.005585260808695031, - "all_grouped_npm": -0.559707560776947, + "all_grouped_average": 0.00731128883927953, + "all_grouped_npm": -0.5565248106235051, "all_grouped": { "enem_challenge": 0.006298110566829951, "bluex": 0.008344923504867872, "oab_exams": 0.004555808656036446, - "assin2_rte": 0.0005437737901033171, + "assin2_rte": 0.0008156606851549756, "assin2_sts": 0.0, - "faquad_nli": 0.029411764705882356, + "faquad_nli": 0.04411764705882353, "hatebr_offensive": 0.0, - "portuguese_hate_speech": 0.0011129660545353367, + "portuguese_hate_speech": 0.0016694490818030048, "tweetsentbr": 0.0 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.006298110566829951, "harness|bluex|bluex|None|3": 0.008344923504867872, "harness|oab_exams|oab_exams|None|3": 0.004555808656036446, - "harness|assin2_rte|assin2_rte|None|15": 0.0005437737901033171, + "harness|assin2_rte|assin2_rte|None|15": 0.0008156606851549756, "harness|assin2_sts|assin2_sts|None|15": 0, - "harness|faquad_nli|faquad_nli|None|15": 0.029411764705882356, + "harness|faquad_nli|faquad_nli|None|15": 0.04411764705882353, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.0, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0011129660545353367, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.0016694490818030048, "harness|tweetsentbr|tweetsentbr|None|25": 0.0 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -125,9 +125,9 @@ "main_score": 0.004555808656036446 }, "harness|assin2_rte|assin2_rte|None|15": { - "f1_macro,all": 0.0005437737901033171, + "f1_macro,all": 0.0008156606851549756, "acc,all": 0.0004084967320261438, - "main_score": 0.0005437737901033171 + "main_score": 0.0008156606851549756 }, "harness|assin2_sts|assin2_sts|None|15": { "pearson,all": 0, @@ -135,9 +135,9 @@ "main_score": 0 }, "harness|faquad_nli|faquad_nli|None|15": { - "f1_macro,all": 0.029411764705882356, + "f1_macro,all": 0.04411764705882353, "acc,all": 0.036923076923076927, - "main_score": 0.029411764705882356 + "main_score": 0.04411764705882353 }, "harness|hatebr_offensive|hatebr_offensive|None|25": { "f1_macro,all": 0.0, @@ -145,9 +145,9 @@ "main_score": 0.0 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.0011129660545353367, + "f1_macro,all": 0.0016694490818030048, "acc,all": 0.0011750881316098707, - "main_score": 0.0011129660545353367 + "main_score": 0.0016694490818030048 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.0, diff --git a/state-spaces/mamba-1.4b-hf/raw_2024-04-18T23-42-19.165668/results.json b/state-spaces/mamba-1.4b-hf/raw_2024-04-18T23-42-19.165668/results.json index 4549c81d7f8822ef9001f78680b347133db241e2..48f4b86f8af79c9e930a317c7667e12eb67164e1 100644 --- a/state-spaces/mamba-1.4b-hf/raw_2024-04-18T23-42-19.165668/results.json +++ b/state-spaces/mamba-1.4b-hf/raw_2024-04-18T23-42-19.165668/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.3428635121469901, - "acc,all": 0.5036764705882353, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.012831311142992859, - "mse,all": 2.4420179738562093, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.20305980528511822, - "acc,exam_id__USP_2023": 0.11363636363636363, - "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913, - "acc,exam_id__UNICAMP_2024": 0.2222222222222222, - "acc,exam_id__USP_2020": 0.21428571428571427, - "acc,exam_id__UNICAMP_2018": 0.24074074074074073, - "acc,exam_id__USP_2019": 0.275, - "acc,exam_id__USP_2021": 0.21153846153846154, - "acc,exam_id__USP_2024": 0.12195121951219512, - "acc,exam_id__UNICAMP_2019": 0.18, - "acc,exam_id__UNICAMP_2020": 0.2, - "acc,exam_id__USP_2022": 0.20408163265306123, - "acc,exam_id__UNICAMP_2023": 0.3023255813953488, - "acc,exam_id__UNICAMP_2022": 0.28205128205128205, - "acc,exam_id__USP_2018": 0.12962962962962962, - "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.2099370188943317, - "acc,exam_id__2017": 0.22413793103448276, - "acc,exam_id__2014": 0.2018348623853211, - "acc,exam_id__2016": 0.2066115702479339, - "acc,exam_id__2013": 0.17592592592592593, - "acc,exam_id__2022": 0.24060150375939848, - "acc,exam_id__2023": 0.2962962962962963, - "acc,exam_id__2015": 0.15966386554621848, - "acc,exam_id__2010": 0.1794871794871795, - "acc,exam_id__2016_2": 0.21951219512195122, - "acc,exam_id__2009": 0.16521739130434782, - "acc,exam_id__2012": 0.21551724137931033, - "acc,exam_id__2011": 0.21367521367521367 - }, - "faquad_nli": { - "f1_macro,all": 0.4396551724137931, - "acc,all": 0.7846153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.4675345929636195, - "acc,all": 0.5285714285714286 - }, - "oab_exams": { - "acc,all": 0.2296127562642369, - "acc,exam_id__2018-25": 0.2875, - "acc,exam_id__2015-16": 0.2375, - "acc,exam_id__2012-06a": 0.2375, - "acc,exam_id__2016-21": 0.2125, - "acc,exam_id__2013-11": 0.1625, - "acc,exam_id__2012-07": 0.1375, - "acc,exam_id__2016-20a": 0.3, - "acc,exam_id__2015-17": 0.24358974358974358, - "acc,exam_id__2015-18": 0.25, - "acc,exam_id__2014-13": 0.2375, - "acc,exam_id__2011-05": 0.2375, - "acc,exam_id__2017-22": 0.25, - "acc,exam_id__2012-09": 0.23376623376623376, - "acc,exam_id__2013-12": 0.1625, - "acc,exam_id__2016-20": 0.225, - "acc,exam_id__2010-02": 0.25, - "acc,exam_id__2014-14": 0.2625, - "acc,exam_id__2012-06": 0.2375, - "acc,exam_id__2017-23": 0.2, - "acc,exam_id__2013-10": 0.2125, - "acc,exam_id__2017-24": 0.225, - "acc,exam_id__2010-01": 0.25882352941176473, - "acc,exam_id__2011-03": 0.24242424242424243, - "acc,exam_id__2011-04": 0.25, - "acc,exam_id__2014-15": 0.21794871794871795, - "acc,exam_id__2016-19": 0.19230769230769232, - "acc,exam_id__2012-08": 0.225, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.3427493138151876, - "acc,all": 0.5534665099882491 - }, - "tweetsentbr": { - "f1_macro,all": 0.2464771462735259, - "acc,all": 0.2855721393034826, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.3428635121469901, + "acc,all": 0.5036764705882353, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.012831311142992859, + "mse,all": 2.4420179738562093, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.20305980528511822, + "acc,exam_id__USP_2023": 0.11363636363636363, + "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913, + "acc,exam_id__UNICAMP_2024": 0.2222222222222222, + "acc,exam_id__USP_2020": 0.21428571428571427, + "acc,exam_id__UNICAMP_2018": 0.24074074074074073, + "acc,exam_id__USP_2019": 0.275, + "acc,exam_id__USP_2021": 0.21153846153846154, + "acc,exam_id__USP_2024": 0.12195121951219512, + "acc,exam_id__UNICAMP_2019": 0.18, + "acc,exam_id__UNICAMP_2020": 0.2, + "acc,exam_id__USP_2022": 0.20408163265306123, + "acc,exam_id__UNICAMP_2023": 0.3023255813953488, + "acc,exam_id__UNICAMP_2022": 0.28205128205128205, + "acc,exam_id__USP_2018": 0.12962962962962962, + "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.2099370188943317, + "acc,exam_id__2017": 0.22413793103448276, + "acc,exam_id__2014": 0.2018348623853211, + "acc,exam_id__2016": 0.2066115702479339, + "acc,exam_id__2013": 0.17592592592592593, + "acc,exam_id__2022": 0.24060150375939848, + "acc,exam_id__2023": 0.2962962962962963, + "acc,exam_id__2015": 0.15966386554621848, + "acc,exam_id__2010": 0.1794871794871795, + "acc,exam_id__2016_2": 0.21951219512195122, + "acc,exam_id__2009": 0.16521739130434782, + "acc,exam_id__2012": 0.21551724137931033, + "acc,exam_id__2011": 0.21367521367521367 + }, + "faquad_nli": { + "f1_macro,all": 0.4396551724137931, + "acc,all": 0.7846153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.4675345929636195, + "acc,all": 0.5285714285714286 + }, + "oab_exams": { + "acc,all": 0.2296127562642369, + "acc,exam_id__2018-25": 0.2875, + "acc,exam_id__2015-16": 0.2375, + "acc,exam_id__2012-06a": 0.2375, + "acc,exam_id__2016-21": 0.2125, + "acc,exam_id__2013-11": 0.1625, + "acc,exam_id__2012-07": 0.1375, + "acc,exam_id__2016-20a": 0.3, + "acc,exam_id__2015-17": 0.24358974358974358, + "acc,exam_id__2015-18": 0.25, + "acc,exam_id__2014-13": 0.2375, + "acc,exam_id__2011-05": 0.2375, + "acc,exam_id__2017-22": 0.25, + "acc,exam_id__2012-09": 0.23376623376623376, + "acc,exam_id__2013-12": 0.1625, + "acc,exam_id__2016-20": 0.225, + "acc,exam_id__2010-02": 0.25, + "acc,exam_id__2014-14": 0.2625, + "acc,exam_id__2012-06": 0.2375, + "acc,exam_id__2017-23": 0.2, + "acc,exam_id__2013-10": 0.2125, + "acc,exam_id__2017-24": 0.225, + "acc,exam_id__2010-01": 0.25882352941176473, + "acc,exam_id__2011-03": 0.24242424242424243, + "acc,exam_id__2011-04": 0.25, + "acc,exam_id__2014-15": 0.21794871794871795, + "acc,exam_id__2016-19": 0.19230769230769232, + "acc,exam_id__2012-08": 0.225, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.5141239707227814, + "acc,all": 0.5534665099882491 + }, + "tweetsentbr": { + "f1_macro,all": 0.2464771462735259, + "acc,all": 0.2855721393034826, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 42, - "non_truncated": 14108, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 56, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "6e46eae61c27280517feef46f536d16b91076f08", - "model_dtype": "torch.float16", - "model_memory_footprint": 2750648320, - "model_num_parameters": 1372178432, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 64, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1378.0061274509803, - "min_seq_length": 1355, - "max_seq_length": 1444, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1514.0061274509803, - "min_seq_length": 1491, - "max_seq_length": 1580, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 26, - "non_truncated": 693, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 39, - "mean_seq_length": 1656.076495132128, - "min_seq_length": 1285, - "max_seq_length": 2440, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.945757997218359 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 16, - "non_truncated": 1413, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 17, - "mean_seq_length": 1559.0517844646606, - "min_seq_length": 1308, - "max_seq_length": 2520, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.988103568929321 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1578.8153846153846, - "min_seq_length": 1525, - "max_seq_length": 1688, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 42, + "non_truncated": 14108, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 56, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "6e46eae61c27280517feef46f536d16b91076f08", + "model_dtype": "torch.float16", + "model_memory_footprint": 2750648320, + "model_num_parameters": 1372178432, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 64, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1292.5114285714285, - "min_seq_length": 1269, - "max_seq_length": 1535, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1340.5503416856493, - "min_seq_length": 1077, - "max_seq_length": 1805, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1378.0061274509803, + "min_seq_length": 1355, + "max_seq_length": 1444, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1514.0061274509803, + "min_seq_length": 1491, + "max_seq_length": 1580, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 26, + "non_truncated": 693, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 39, + "mean_seq_length": 1656.076495132128, + "min_seq_length": 1285, + "max_seq_length": 2440, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.945757997218359 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 16, + "non_truncated": 1413, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 17, + "mean_seq_length": 1559.0517844646606, + "min_seq_length": 1308, + "max_seq_length": 2520, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.988103568929321 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1578.8153846153846, + "min_seq_length": 1525, + "max_seq_length": 1688, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1292.5114285714285, + "min_seq_length": 1269, + "max_seq_length": 1535, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1340.5503416856493, + "min_seq_length": 1077, + "max_seq_length": 1805, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1775.5558166862515, + "min_seq_length": 1741, + "max_seq_length": 1812, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1634.2800995024875, + "min_seq_length": 1614, + "max_seq_length": 1693, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1775.5558166862515, - "min_seq_length": 1741, - "max_seq_length": 1812, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=state-spaces/mamba-1.4b-hf,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1634.2800995024875, - "min_seq_length": 1614, - "max_seq_length": 1693, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=state-spaces/mamba-1.4b-hf,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "0e4d6ae" + "git_hash": "0e4d6ae" } \ No newline at end of file diff --git a/state-spaces/mamba-1.4b-hf/results_2024-04-18T23-42-19.165668.json b/state-spaces/mamba-1.4b-hf/results_2024-04-18T23-42-19.165668.json index 4c15e6171348e65f8a05c6d05650358388a0148d..b4254a4616d42b97dfdb4c048dce3ac83fa8f8e9 100644 --- a/state-spaces/mamba-1.4b-hf/results_2024-04-18T23-42-19.165668.json +++ b/state-spaces/mamba-1.4b-hf/results_2024-04-18T23-42-19.165668.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.27719118102219953, - "all_grouped_npm": -0.09136901850378981, + "all_grouped_average": 0.2962328095674877, + "all_grouped_npm": -0.05482078713087582, "all_grouped": { "enem_challenge": 0.2099370188943317, "bluex": 0.20305980528511822, @@ -44,7 +44,7 @@ "assin2_sts": 0.012831311142992859, "faquad_nli": 0.4396551724137931, "hatebr_offensive": 0.4675345929636195, - "portuguese_hate_speech": 0.3427493138151876, + "portuguese_hate_speech": 0.5141239707227814, "tweetsentbr": 0.2464771462735259 }, "all": { @@ -55,7 +55,7 @@ "harness|assin2_sts|assin2_sts|None|15": 0.012831311142992859, "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.4675345929636195, - "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3427493138151876, + "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5141239707227814, "harness|tweetsentbr|tweetsentbr|None|25": 0.2464771462735259 }, "harness|enem_challenge|enem_challenge|None|3": { @@ -145,9 +145,9 @@ "main_score": 0.4675345929636195 }, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": { - "f1_macro,all": 0.3427493138151876, + "f1_macro,all": 0.5141239707227814, "acc,all": 0.5534665099882491, - "main_score": 0.3427493138151876 + "main_score": 0.5141239707227814 }, "harness|tweetsentbr|tweetsentbr|None|25": { "f1_macro,all": 0.2464771462735259, diff --git a/teknium/OpenHermes-2-Mistral-7B/raw_2024-02-27T00-53-05.335169/results.json b/teknium/OpenHermes-2-Mistral-7B/raw_2024-02-27T00-53-05.335169/results.json index eda537f9723305289589a3318634999e20139e97..36cfdf7722fe87411168bc192e96f45160739c97 100644 --- a/teknium/OpenHermes-2-Mistral-7B/raw_2024-02-27T00-53-05.335169/results.json +++ b/teknium/OpenHermes-2-Mistral-7B/raw_2024-02-27T00-53-05.335169/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8986873329127992, - "acc,all": 0.8986928104575164, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6891008566875226, - "mse,all": 0.7112785947712418, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5424200278164116, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2023": 0.5348837209302325, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__USP_2021": 0.5, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__USP_2020": 0.5, - "acc,exam_id__USP_2018": 0.48148148148148145, - "acc,exam_id__USP_2019": 0.425, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__USP_2022": 0.42857142857142855, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6270118964310707, - "acc,exam_id__2016_2": 0.6341463414634146, - "acc,exam_id__2023": 0.6888888888888889, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2017": 0.6293103448275862, - "acc,exam_id__2009": 0.6086956521739131, - "acc,exam_id__2015": 0.5966386554621849, - "acc,exam_id__2016": 0.6528925619834711, - "acc,exam_id__2022": 0.6015037593984962, - "acc,exam_id__2012": 0.5775862068965517, - "acc,exam_id__2013": 0.6018518518518519, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2010": 0.5897435897435898 - }, - "faquad_nli": { - "f1_macro,all": 0.6618340732519423, - "acc,all": 0.8153846153846154, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7393515952707694, - "acc,all": 0.7535714285714286 - }, - "oab_exams": { - "acc,all": 0.42596810933940776, - "acc,exam_id__2012-08": 0.5, - "acc,exam_id__2015-17": 0.5256410256410257, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2014-13": 0.375, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2016-20a": 0.4125, - "acc,exam_id__2012-06a": 0.4375, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2014-14": 0.5375, - "acc,exam_id__2018-25": 0.3625, - "acc,exam_id__2013-10": 0.45, - "acc,exam_id__2011-05": 0.4375, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2011-03": 0.31313131313131315, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2015-16": 0.3625, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2014-15": 0.5, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2016-19": 0.46153846153846156, - "acc,exam_id__2015-18": 0.4, - "acc,exam_id__2013-12": 0.4875, - "acc,exam_id__2010-02": 0.46, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6815454306722689, - "acc,all": 0.7320799059929495 - }, - "tweetsentbr": { - "f1_macro,all": 0.4725714806310689, - "acc,all": 0.6930348258706468, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8986873329127992, + "acc,all": 0.8986928104575164, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6891008566875226, + "mse,all": 0.7112785947712418, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5424200278164116, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2023": 0.5348837209302325, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__USP_2021": 0.5, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__USP_2020": 0.5, + "acc,exam_id__USP_2018": 0.48148148148148145, + "acc,exam_id__USP_2019": 0.425, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__USP_2022": 0.42857142857142855, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6270118964310707, + "acc,exam_id__2016_2": 0.6341463414634146, + "acc,exam_id__2023": 0.6888888888888889, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2017": 0.6293103448275862, + "acc,exam_id__2009": 0.6086956521739131, + "acc,exam_id__2015": 0.5966386554621849, + "acc,exam_id__2016": 0.6528925619834711, + "acc,exam_id__2022": 0.6015037593984962, + "acc,exam_id__2012": 0.5775862068965517, + "acc,exam_id__2013": 0.6018518518518519, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2010": 0.5897435897435898 + }, + "faquad_nli": { + "f1_macro,all": 0.6618340732519423, + "acc,all": 0.8153846153846154, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7393515952707694, + "acc,all": 0.7535714285714286 + }, + "oab_exams": { + "acc,all": 0.42596810933940776, + "acc,exam_id__2012-08": 0.5, + "acc,exam_id__2015-17": 0.5256410256410257, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2014-13": 0.375, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2016-20a": 0.4125, + "acc,exam_id__2012-06a": 0.4375, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2014-14": 0.5375, + "acc,exam_id__2018-25": 0.3625, + "acc,exam_id__2013-10": 0.45, + "acc,exam_id__2011-05": 0.4375, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2011-03": 0.31313131313131315, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2015-16": 0.3625, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2014-15": 0.5, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2016-19": 0.46153846153846156, + "acc,exam_id__2015-18": 0.4, + "acc,exam_id__2013-12": 0.4875, + "acc,exam_id__2010-02": 0.46, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6815454306722689, + "acc,all": 0.7320799059929495 + }, + "tweetsentbr": { + "f1_macro,all": 0.6300953075080918, + "acc,all": 0.6930348258706468, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "4c6e34123b140ce773a8433cae5410949289102c", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020376064, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 16, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "4c6e34123b140ce773a8433cae5410949289102c", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020376064, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 16, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=teknium/OpenHermes-2-Mistral-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=teknium/OpenHermes-2-Mistral-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/teknium/OpenHermes-2-Mistral-7B/results_2024-02-27T00-53-05.335169.json b/teknium/OpenHermes-2-Mistral-7B/results_2024-02-27T00-53-05.335169.json index c81f40c4686e8b34003758d3c4c9a3ef94f90146..da7366ef3f4fecb013d563493be78628833da4f2 100644 --- a/teknium/OpenHermes-2-Mistral-7B/results_2024-02-27T00-53-05.335169.json +++ b/teknium/OpenHermes-2-Mistral-7B/results_2024-02-27T00-53-05.335169.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6376100892236958, - "all_grouped_npm": 0.45837915240917326, + "all_grouped_average": 0.6551127366544761, + "all_grouped_npm": 0.4844247587049773, "all_grouped": { "enem_challenge": 0.6270118964310707, "bluex": 0.5424200278164116, @@ -45,7 +45,7 @@ "faquad_nli": 0.6618340732519423, "hatebr_offensive": 0.7393515952707694, "portuguese_hate_speech": 0.6815454306722689, - "tweetsentbr": 0.4725714806310689 + "tweetsentbr": 0.6300953075080918 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6270118964310707, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.6618340732519423, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7393515952707694, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6815454306722689, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4725714806310689 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6300953075080918 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6270118964310707, @@ -150,9 +150,9 @@ "main_score": 0.6815454306722689 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4725714806310689, + "f1_macro,all": 0.6300953075080918, "acc,all": 0.6930348258706468, - "main_score": 0.4725714806310689 + "main_score": 0.6300953075080918 } }, "config_tasks": { diff --git a/teknium/OpenHermes-2.5-Mistral-7B/raw_2024-02-27T01-52-36.885316/results.json b/teknium/OpenHermes-2.5-Mistral-7B/raw_2024-02-27T01-52-36.885316/results.json index 6bba0d2e38d2f30e584b558cb79b7bab22a0504f..b1f78433a379c951765195a4479c482180d68bf3 100644 --- a/teknium/OpenHermes-2.5-Mistral-7B/raw_2024-02-27T01-52-36.885316/results.json +++ b/teknium/OpenHermes-2.5-Mistral-7B/raw_2024-02-27T01-52-36.885316/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9009851080543532, - "acc,all": 0.9011437908496732, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.6887378174660355, - "mse,all": 0.7285049019607842, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5285118219749653, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__UNICAMP_2023": 0.5348837209302325, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__UNICAMP_2022": 0.6410256410256411, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2020": 0.6181818181818182, - "acc,exam_id__USP_2020": 0.5, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, - "acc,exam_id__USP_2024": 0.6341463414634146, - "acc,exam_id__USP_2022": 0.42857142857142855, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6389083275017495, - "acc,exam_id__2016_2": 0.5691056910569106, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2017": 0.6379310344827587, - "acc,exam_id__2009": 0.6086956521739131, - "acc,exam_id__2015": 0.5714285714285714, - "acc,exam_id__2016": 0.6446280991735537, - "acc,exam_id__2022": 0.6390977443609023, - "acc,exam_id__2012": 0.6551724137931034, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2011": 0.7264957264957265, - "acc,exam_id__2010": 0.6581196581196581 - }, - "faquad_nli": { - "f1_macro,all": 0.7119751855544478, - "acc,all": 0.8276923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8017530796462489, - "acc,all": 0.8064285714285714 - }, - "oab_exams": { - "acc,all": 0.43644646924829156, - "acc,exam_id__2012-08": 0.4125, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2012-09": 0.37662337662337664, - "acc,exam_id__2013-11": 0.425, - "acc,exam_id__2014-13": 0.3875, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2010-01": 0.35294117647058826, - "acc,exam_id__2016-20a": 0.5, - "acc,exam_id__2012-06a": 0.55, - "acc,exam_id__2017-23": 0.4125, - "acc,exam_id__2014-14": 0.4875, - "acc,exam_id__2018-25": 0.4125, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2011-05": 0.4875, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2011-03": 0.3434343434343434, - "acc,exam_id__2016-21": 0.45, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2011-04": 0.3875, - "acc,exam_id__2016-20": 0.4375, - "acc,exam_id__2014-15": 0.48717948717948717, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2016-19": 0.5256410256410257, - "acc,exam_id__2015-18": 0.375, - "acc,exam_id__2013-12": 0.5125, - "acc,exam_id__2010-02": 0.42, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.690165944889434, - "acc,all": 0.7320799059929495 - }, - "tweetsentbr": { - "f1_macro,all": 0.4384148442756514, - "acc,all": 0.6616915422885572, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9009851080543532, + "acc,all": 0.9011437908496732, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.6887378174660355, + "mse,all": 0.7285049019607842, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5285118219749653, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__UNICAMP_2023": 0.5348837209302325, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__UNICAMP_2022": 0.6410256410256411, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2020": 0.6181818181818182, + "acc,exam_id__USP_2020": 0.5, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174, + "acc,exam_id__USP_2024": 0.6341463414634146, + "acc,exam_id__USP_2022": 0.42857142857142855, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6389083275017495, + "acc,exam_id__2016_2": 0.5691056910569106, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2017": 0.6379310344827587, + "acc,exam_id__2009": 0.6086956521739131, + "acc,exam_id__2015": 0.5714285714285714, + "acc,exam_id__2016": 0.6446280991735537, + "acc,exam_id__2022": 0.6390977443609023, + "acc,exam_id__2012": 0.6551724137931034, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2011": 0.7264957264957265, + "acc,exam_id__2010": 0.6581196581196581 + }, + "faquad_nli": { + "f1_macro,all": 0.7119751855544478, + "acc,all": 0.8276923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8017530796462489, + "acc,all": 0.8064285714285714 + }, + "oab_exams": { + "acc,all": 0.43644646924829156, + "acc,exam_id__2012-08": 0.4125, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2012-09": 0.37662337662337664, + "acc,exam_id__2013-11": 0.425, + "acc,exam_id__2014-13": 0.3875, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2010-01": 0.35294117647058826, + "acc,exam_id__2016-20a": 0.5, + "acc,exam_id__2012-06a": 0.55, + "acc,exam_id__2017-23": 0.4125, + "acc,exam_id__2014-14": 0.4875, + "acc,exam_id__2018-25": 0.4125, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2011-05": 0.4875, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2011-03": 0.3434343434343434, + "acc,exam_id__2016-21": 0.45, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2011-04": 0.3875, + "acc,exam_id__2016-20": 0.4375, + "acc,exam_id__2014-15": 0.48717948717948717, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2016-19": 0.5256410256410257, + "acc,exam_id__2015-18": 0.375, + "acc,exam_id__2013-12": 0.5125, + "acc,exam_id__2010-02": 0.42, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.690165944889434, + "acc,all": 0.7320799059929495 + }, + "tweetsentbr": { + "f1_macro,all": 0.5845531257008686, + "acc,all": 0.6616915422885572, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "24c0bea14d53e6f67f1fbe2eca5bfe7cae389b33", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 15020376064, - "model_num_parameters": 7241748480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1531.7455065359477, - "min_seq_length": 1508, - "max_seq_length": 1598, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1740.7455065359477, - "min_seq_length": 1717, - "max_seq_length": 1807, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1761.9262865090404, - "min_seq_length": 1385, - "max_seq_length": 2562, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1662.039188243527, - "min_seq_length": 1396, - "max_seq_length": 2660, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9876923076922, - "min_seq_length": 1701, - "max_seq_length": 1877, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1567.3878571428572, - "min_seq_length": 1544, - "max_seq_length": 1818, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "24c0bea14d53e6f67f1fbe2eca5bfe7cae389b33", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 15020376064, + "model_num_parameters": 7241748480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1407.764464692483, - "min_seq_length": 1141, - "max_seq_length": 1910, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1531.7455065359477, + "min_seq_length": 1508, + "max_seq_length": 1598, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1740.7455065359477, + "min_seq_length": 1717, + "max_seq_length": 1807, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1761.9262865090404, + "min_seq_length": 1385, + "max_seq_length": 2562, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1662.039188243527, + "min_seq_length": 1396, + "max_seq_length": 2660, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9876923076922, + "min_seq_length": 1701, + "max_seq_length": 1877, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1567.3878571428572, + "min_seq_length": 1544, + "max_seq_length": 1818, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1407.764464692483, + "min_seq_length": 1141, + "max_seq_length": 1910, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2068.3360752056406, + "min_seq_length": 2033, + "max_seq_length": 2107, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1814.2492537313433, + "min_seq_length": 1793, + "max_seq_length": 1909, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2068.3360752056406, - "min_seq_length": 2033, - "max_seq_length": 2107, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=teknium/OpenHermes-2.5-Mistral-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1814.2492537313433, - "min_seq_length": 1793, - "max_seq_length": 1909, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=teknium/OpenHermes-2.5-Mistral-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/teknium/OpenHermes-2.5-Mistral-7B/results_2024-02-27T01-52-36.885316.json b/teknium/OpenHermes-2.5-Mistral-7B/results_2024-02-27T01-52-36.885316.json index 3092009222eba843358a1f3c762e475e985e7e32..bdadaa5fe19e5adcc7bf0d7c0a0daa35c368bfa0 100644 --- a/teknium/OpenHermes-2.5-Mistral-7B/results_2024-02-27T01-52-36.885316.json +++ b/teknium/OpenHermes-2.5-Mistral-7B/results_2024-02-27T01-52-36.885316.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6484331776234641, - "all_grouped_npm": 0.48035915464260714, + "all_grouped_average": 0.6646707644484883, + "all_grouped_npm": 0.5045222302750836, "all_grouped": { "enem_challenge": 0.6389083275017495, "bluex": 0.5285118219749653, @@ -45,7 +45,7 @@ "faquad_nli": 0.7119751855544478, "hatebr_offensive": 0.8017530796462489, "portuguese_hate_speech": 0.690165944889434, - "tweetsentbr": 0.4384148442756514 + "tweetsentbr": 0.5845531257008686 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6389083275017495, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7119751855544478, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8017530796462489, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.690165944889434, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4384148442756514 + "harness|tweetsentbr|tweetsentbr|None|25": 0.5845531257008686 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6389083275017495, @@ -150,9 +150,9 @@ "main_score": 0.690165944889434 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4384148442756514, + "f1_macro,all": 0.5845531257008686, "acc,all": 0.6616915422885572, - "main_score": 0.4384148442756514 + "main_score": 0.5845531257008686 } }, "config_tasks": { diff --git a/upstage/SOLAR-10.7B-Instruct-v1.0/raw_2024-02-24T23-56-01.386628/results.json b/upstage/SOLAR-10.7B-Instruct-v1.0/raw_2024-02-24T23-56-01.386628/results.json index 4dafc8087917cf0f60370e1a1c860f96cd0d0eef..4132e2b1bcbbf8805f081baaf81576fffb474508 100644 --- a/upstage/SOLAR-10.7B-Instruct-v1.0/raw_2024-02-24T23-56-01.386628/results.json +++ b/upstage/SOLAR-10.7B-Instruct-v1.0/raw_2024-02-24T23-56-01.386628/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.927692591384275, - "acc,all": 0.9276960784313726, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8166852379077367, - "mse,all": 0.4969703835784313, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5827538247566064, - "acc,exam_id__USP_2019": 0.525, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__UNICAMP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2019": 0.54, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__UNICAMP_2020": 0.6181818181818182, - "acc,exam_id__UNICAMP_2023": 0.7209302325581395, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__USP_2018": 0.5370370370370371, - "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__USP_2024": 0.8048780487804879, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6822953114065781, - "acc,exam_id__2013": 0.6944444444444444, - "acc,exam_id__2016_2": 0.6666666666666666, - "acc,exam_id__2016": 0.6776859504132231, - "acc,exam_id__2011": 0.7350427350427351, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2023": 0.6370370370370371, - "acc,exam_id__2014": 0.6788990825688074, - "acc,exam_id__2012": 0.7241379310344828, - "acc,exam_id__2009": 0.6347826086956522, - "acc,exam_id__2015": 0.7310924369747899, - "acc,exam_id__2022": 0.631578947368421, - "acc,exam_id__2010": 0.717948717948718 - }, - "faquad_nli": { - "f1_macro,all": 0.7543037140136254, - "acc,all": 0.8015384615384615, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8039482041435217, - "acc,all": 0.8092857142857143 - }, - "oab_exams": { - "acc,all": 0.46059225512528473, - "acc,exam_id__2016-20a": 0.475, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2015-18": 0.575, - "acc,exam_id__2014-14": 0.5625, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2015-16": 0.4625, - "acc,exam_id__2011-05": 0.4875, - "acc,exam_id__2012-06a": 0.4, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2016-19": 0.44871794871794873, - "acc,exam_id__2017-24": 0.4875, - "acc,exam_id__2016-20": 0.425, - "acc,exam_id__2017-22": 0.55, - "acc,exam_id__2013-12": 0.475, - "acc,exam_id__2010-02": 0.43, - "acc,exam_id__2011-03": 0.42424242424242425, - "acc,exam_id__2012-08": 0.525, - "acc,exam_id__2013-10": 0.4875, - "acc,exam_id__2016-21": 0.4125, - "acc,exam_id__2014-15": 0.5384615384615384, - "acc,exam_id__2018-25": 0.425, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2010-01": 0.27058823529411763, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2013-11": 0.5125, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2012-09": 0.36363636363636365, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6913146377432091, - "acc,all": 0.7132784958871915 - }, - "tweetsentbr": { - "f1_macro,all": 0.5322791024834643, - "acc,all": 0.7313432835820896, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.927692591384275, + "acc,all": 0.9276960784313726, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8166852379077367, + "mse,all": 0.4969703835784313, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5827538247566064, + "acc,exam_id__USP_2019": 0.525, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__UNICAMP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2019": 0.54, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__UNICAMP_2020": 0.6181818181818182, + "acc,exam_id__UNICAMP_2023": 0.7209302325581395, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__USP_2018": 0.5370370370370371, + "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__USP_2024": 0.8048780487804879, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6822953114065781, + "acc,exam_id__2013": 0.6944444444444444, + "acc,exam_id__2016_2": 0.6666666666666666, + "acc,exam_id__2016": 0.6776859504132231, + "acc,exam_id__2011": 0.7350427350427351, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2023": 0.6370370370370371, + "acc,exam_id__2014": 0.6788990825688074, + "acc,exam_id__2012": 0.7241379310344828, + "acc,exam_id__2009": 0.6347826086956522, + "acc,exam_id__2015": 0.7310924369747899, + "acc,exam_id__2022": 0.631578947368421, + "acc,exam_id__2010": 0.717948717948718 + }, + "faquad_nli": { + "f1_macro,all": 0.7543037140136254, + "acc,all": 0.8015384615384615, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8039482041435217, + "acc,all": 0.8092857142857143 + }, + "oab_exams": { + "acc,all": 0.46059225512528473, + "acc,exam_id__2016-20a": 0.475, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2015-18": 0.575, + "acc,exam_id__2014-14": 0.5625, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2015-16": 0.4625, + "acc,exam_id__2011-05": 0.4875, + "acc,exam_id__2012-06a": 0.4, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2016-19": 0.44871794871794873, + "acc,exam_id__2017-24": 0.4875, + "acc,exam_id__2016-20": 0.425, + "acc,exam_id__2017-22": 0.55, + "acc,exam_id__2013-12": 0.475, + "acc,exam_id__2010-02": 0.43, + "acc,exam_id__2011-03": 0.42424242424242425, + "acc,exam_id__2012-08": 0.525, + "acc,exam_id__2013-10": 0.4875, + "acc,exam_id__2016-21": 0.4125, + "acc,exam_id__2014-15": 0.5384615384615384, + "acc,exam_id__2018-25": 0.425, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2010-01": 0.27058823529411763, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2013-11": 0.5125, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2012-09": 0.36363636363636365, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6913146377432091, + "acc,all": 0.7132784958871915 + }, + "tweetsentbr": { + "f1_macro,all": 0.7097054699779523, + "acc,all": 0.7313432835820896, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "cd8ae35fb366b089e13377a1cc03d656cc775314", - "model_dtype": "torch.float16", - "model_memory_footprint": 21597278208, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 4096, - "max_ctx_length": 4064, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "cd8ae35fb366b089e13377a1cc03d656cc775314", + "model_dtype": "torch.float16", + "model_memory_footprint": 21597278208, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 4096, + "max_ctx_length": 4064, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 4064, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=upstage/SOLAR-10.7B-Instruct-v1.0,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 4064, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=upstage/SOLAR-10.7B-Instruct-v1.0,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/upstage/SOLAR-10.7B-Instruct-v1.0/results_2024-02-24T23-56-01.386628.json b/upstage/SOLAR-10.7B-Instruct-v1.0/results_2024-02-24T23-56-01.386628.json index dddb7f22853eeec5e279247965d05ddc4304264f..5a6c0e16bc3d3cc7842085b6be6e7e080c84931e 100644 --- a/upstage/SOLAR-10.7B-Instruct-v1.0/results_2024-02-24T23-56-01.386628.json +++ b/upstage/SOLAR-10.7B-Instruct-v1.0/results_2024-02-24T23-56-01.386628.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6946516532182557, - "all_grouped_npm": 0.5427884839238583, + "all_grouped_average": 0.7143656940509766, + "all_grouped_npm": 0.5721248542106453, "all_grouped": { "enem_challenge": 0.6822953114065781, "bluex": 0.5827538247566064, @@ -45,7 +45,7 @@ "faquad_nli": 0.7543037140136254, "hatebr_offensive": 0.8039482041435217, "portuguese_hate_speech": 0.6913146377432091, - "tweetsentbr": 0.5322791024834643 + "tweetsentbr": 0.7097054699779523 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6822953114065781, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7543037140136254, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8039482041435217, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6913146377432091, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5322791024834643 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7097054699779523 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6822953114065781, @@ -150,9 +150,9 @@ "main_score": 0.6913146377432091 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5322791024834643, + "f1_macro,all": 0.7097054699779523, "acc,all": 0.7313432835820896, - "main_score": 0.5322791024834643 + "main_score": 0.7097054699779523 } }, "config_tasks": { diff --git a/uygarkurt/llama-3-merged-linear/raw_2024-05-18T12-22-49.038176/results.json b/uygarkurt/llama-3-merged-linear/raw_2024-05-18T12-22-49.038176/results.json index b05ac5c2ec8013afc5890c93d48f73ccc155bb11..82b771be72a84ec9e3ace741f4db8d6020dcc439 100644 --- a/uygarkurt/llama-3-merged-linear/raw_2024-05-18T12-22-49.038176/results.json +++ b/uygarkurt/llama-3-merged-linear/raw_2024-05-18T12-22-49.038176/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9170332834196958, - "acc,all": 0.9170751633986928, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.72913821581632, - "mse,all": 0.6943586601307189, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.588317107093185, - "acc,exam_id__UNICAMP_2024": 0.6666666666666666, - "acc,exam_id__USP_2019": 0.65, - "acc,exam_id__UNICAMP_2019": 0.62, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__UNICAMP_2022": 0.717948717948718, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__UNICAMP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__USP_2020": 0.5714285714285714, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__USP_2023": 0.6818181818181818, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.7095871238628412, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2017": 0.6982758620689655, - "acc,exam_id__2013": 0.6851851851851852, - "acc,exam_id__2016": 0.6942148760330579, - "acc,exam_id__2011": 0.7350427350427351, - "acc,exam_id__2015": 0.7058823529411765, - "acc,exam_id__2022": 0.6691729323308271, - "acc,exam_id__2014": 0.7247706422018348, - "acc,exam_id__2010": 0.717948717948718, - "acc,exam_id__2009": 0.7478260869565218, - "acc,exam_id__2016_2": 0.6585365853658537, - "acc,exam_id__2023": 0.762962962962963 - }, - "faquad_nli": { - "f1_macro,all": 0.7494336310476937, - "acc,all": 0.8, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8685110509928028, - "acc,all": 0.8685714285714285 - }, - "oab_exams": { - "acc,all": 0.5148063781321185, - "acc,exam_id__2014-15": 0.5769230769230769, - "acc,exam_id__2012-07": 0.4875, - "acc,exam_id__2016-20a": 0.4625, - "acc,exam_id__2015-16": 0.5, - "acc,exam_id__2016-21": 0.3875, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2010-02": 0.56, - "acc,exam_id__2012-06": 0.5375, - "acc,exam_id__2018-25": 0.525, - "acc,exam_id__2011-04": 0.5, - "acc,exam_id__2012-08": 0.5125, - "acc,exam_id__2015-18": 0.5375, - "acc,exam_id__2011-05": 0.45, - "acc,exam_id__2012-09": 0.5454545454545454, - "acc,exam_id__2017-24": 0.4875, - "acc,exam_id__2012-06a": 0.5625, - "acc,exam_id__2016-20": 0.5625, - "acc,exam_id__2013-12": 0.55, - "acc,exam_id__2016-19": 0.5512820512820513, - "acc,exam_id__2014-14": 0.6125, - "acc,exam_id__2017-22": 0.575, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2011-03": 0.48484848484848486, - "acc,exam_id__2015-17": 0.6410256410256411, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2013-11": 0.5375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6047540154030653, - "acc,all": 0.6063454759106933 - }, - "tweetsentbr": { - "f1_macro,all": 0.5038642629662892, - "acc,all": 0.7238805970149254, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9170332834196958, + "acc,all": 0.9170751633986928, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.72913821581632, + "mse,all": 0.6943586601307189, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.588317107093185, + "acc,exam_id__UNICAMP_2024": 0.6666666666666666, + "acc,exam_id__USP_2019": 0.65, + "acc,exam_id__UNICAMP_2019": 0.62, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__UNICAMP_2022": 0.717948717948718, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__UNICAMP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__USP_2020": 0.5714285714285714, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__USP_2023": 0.6818181818181818, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.7095871238628412, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2017": 0.6982758620689655, + "acc,exam_id__2013": 0.6851851851851852, + "acc,exam_id__2016": 0.6942148760330579, + "acc,exam_id__2011": 0.7350427350427351, + "acc,exam_id__2015": 0.7058823529411765, + "acc,exam_id__2022": 0.6691729323308271, + "acc,exam_id__2014": 0.7247706422018348, + "acc,exam_id__2010": 0.717948717948718, + "acc,exam_id__2009": 0.7478260869565218, + "acc,exam_id__2016_2": 0.6585365853658537, + "acc,exam_id__2023": 0.762962962962963 + }, + "faquad_nli": { + "f1_macro,all": 0.7494336310476937, + "acc,all": 0.8, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8685110509928028, + "acc,all": 0.8685714285714285 + }, + "oab_exams": { + "acc,all": 0.5148063781321185, + "acc,exam_id__2014-15": 0.5769230769230769, + "acc,exam_id__2012-07": 0.4875, + "acc,exam_id__2016-20a": 0.4625, + "acc,exam_id__2015-16": 0.5, + "acc,exam_id__2016-21": 0.3875, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2010-02": 0.56, + "acc,exam_id__2012-06": 0.5375, + "acc,exam_id__2018-25": 0.525, + "acc,exam_id__2011-04": 0.5, + "acc,exam_id__2012-08": 0.5125, + "acc,exam_id__2015-18": 0.5375, + "acc,exam_id__2011-05": 0.45, + "acc,exam_id__2012-09": 0.5454545454545454, + "acc,exam_id__2017-24": 0.4875, + "acc,exam_id__2012-06a": 0.5625, + "acc,exam_id__2016-20": 0.5625, + "acc,exam_id__2013-12": 0.55, + "acc,exam_id__2016-19": 0.5512820512820513, + "acc,exam_id__2014-14": 0.6125, + "acc,exam_id__2017-22": 0.575, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2011-03": 0.48484848484848486, + "acc,exam_id__2015-17": 0.6410256410256411, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2013-11": 0.5375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6047540154030653, + "acc,all": 0.6063454759106933 + }, + "tweetsentbr": { + "f1_macro,all": 0.6718190172883856, + "acc,all": 0.7238805970149254, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 0, - "non_truncated": 14150, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 0, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "3a053dd8d2e9117544bf9f608beafb1aabe7b09c", - "model_dtype": "torch.float16", - "model_memory_footprint": 16194748416, - "model_num_parameters": 8030261248, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1318.5322712418301, - "min_seq_length": 1299, - "max_seq_length": 1382, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1509.5322712418301, - "min_seq_length": 1490, - "max_seq_length": 1573, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1484.7719054242002, - "min_seq_length": 1165, - "max_seq_length": 2134, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 0, - "non_truncated": 1429, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 0, - "mean_seq_length": 1412.3547935619315, - "min_seq_length": 1187, - "max_seq_length": 2340, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1447.8215384615385, - "min_seq_length": 1402, - "max_seq_length": 1544, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 0, + "non_truncated": 14150, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 0, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "3a053dd8d2e9117544bf9f608beafb1aabe7b09c", + "model_dtype": "torch.float16", + "model_memory_footprint": 16194748416, + "model_num_parameters": 8030261248, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1279.3878571428572, - "min_seq_length": 1259, - "max_seq_length": 1498, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1220.3772209567198, - "min_seq_length": 988, - "max_seq_length": 1654, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1318.5322712418301, + "min_seq_length": 1299, + "max_seq_length": 1382, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1509.5322712418301, + "min_seq_length": 1490, + "max_seq_length": 1573, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1484.7719054242002, + "min_seq_length": 1165, + "max_seq_length": 2134, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 0, + "non_truncated": 1429, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 0, + "mean_seq_length": 1412.3547935619315, + "min_seq_length": 1187, + "max_seq_length": 2340, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1447.8215384615385, + "min_seq_length": 1402, + "max_seq_length": 1544, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1279.3878571428572, + "min_seq_length": 1259, + "max_seq_length": 1498, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1220.3772209567198, + "min_seq_length": 988, + "max_seq_length": 1654, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1676.4195064629848, + "min_seq_length": 1646, + "max_seq_length": 1708, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1537.1537313432837, + "min_seq_length": 1520, + "max_seq_length": 1585, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1676.4195064629848, - "min_seq_length": 1646, - "max_seq_length": 1708, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=uygarkurt/llama-3-merged-linear,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1537.1537313432837, - "min_seq_length": 1520, - "max_seq_length": 1585, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=uygarkurt/llama-3-merged-linear,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/uygarkurt/llama-3-merged-linear/results_2024-05-18T12-22-49.038176.json b/uygarkurt/llama-3-merged-linear/results_2024-05-18T12-22-49.038176.json index 82b25def341819185df4ca0560947ca3f23bb8a8..d2ae7e9088cd8dd1b15a4085e1058b82b5295464 100644 --- a/uygarkurt/llama-3-merged-linear/results_2024-05-18T12-22-49.038176.json +++ b/uygarkurt/llama-3-merged-linear/results_2024-05-18T12-22-49.038176.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.687271674303779, - "all_grouped_npm": 0.5335061697663437, + "all_grouped_average": 0.7059333136729009, + "all_grouped_npm": 0.5612764664465846, "all_grouped": { "enem_challenge": 0.7095871238628412, "bluex": 0.588317107093185, @@ -45,7 +45,7 @@ "faquad_nli": 0.7494336310476937, "hatebr_offensive": 0.8685110509928028, "portuguese_hate_speech": 0.6047540154030653, - "tweetsentbr": 0.5038642629662892 + "tweetsentbr": 0.6718190172883856 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.7095871238628412, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7494336310476937, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8685110509928028, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6047540154030653, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5038642629662892 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6718190172883856 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.7095871238628412, @@ -150,9 +150,9 @@ "main_score": 0.6047540154030653 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5038642629662892, + "f1_macro,all": 0.6718190172883856, "acc,all": 0.7238805970149254, - "main_score": 0.5038642629662892 + "main_score": 0.6718190172883856 } }, "config_tasks": { diff --git a/vicgalle/CarbonBeagle-11B-truthy/raw_2024-05-20T00-40-47.200785/results.json b/vicgalle/CarbonBeagle-11B-truthy/raw_2024-05-20T00-40-47.200785/results.json index 1dc367256a2a455c3e0b4fc3f46d9ac7afe8456b..184589bf2ac0e4dcbae849cdc516d74a7d243134 100644 --- a/vicgalle/CarbonBeagle-11B-truthy/raw_2024-05-20T00-40-47.200785/results.json +++ b/vicgalle/CarbonBeagle-11B-truthy/raw_2024-05-20T00-40-47.200785/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.924836149775025, - "acc,all": 0.9248366013071896, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8120497093970859, - "mse,all": 0.41340784803921565, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.6008344923504868, - "acc,exam_id__USP_2019": 0.6, - "acc,exam_id__USP_2022": 0.5510204081632653, - "acc,exam_id__UNICAMP_2023": 0.6511627906976745, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2024": 0.6, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__UNICAMP_2021_2": 0.6274509803921569, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__USP_2024": 0.8292682926829268, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__UNICAMP_2018": 0.5740740740740741, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6934919524142757, - "acc,exam_id__2016": 0.6776859504132231, - "acc,exam_id__2013": 0.6574074074074074, - "acc,exam_id__2011": 0.7521367521367521, - "acc,exam_id__2009": 0.7043478260869566, - "acc,exam_id__2016_2": 0.6422764227642277, - "acc,exam_id__2022": 0.631578947368421, - "acc,exam_id__2014": 0.6788990825688074, - "acc,exam_id__2015": 0.6890756302521008, - "acc,exam_id__2017": 0.7327586206896551, - "acc,exam_id__2010": 0.717948717948718, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2023": 0.725925925925926 - }, - "faquad_nli": { - "f1_macro,all": 0.7755116879339805, - "acc,all": 0.8246153846153846, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8180089679918182, - "acc,all": 0.8221428571428572 - }, - "oab_exams": { - "acc,all": 0.46560364464692483, - "acc,exam_id__2011-05": 0.5, - "acc,exam_id__2016-21": 0.4625, - "acc,exam_id__2017-22": 0.5, - "acc,exam_id__2013-11": 0.4625, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2014-13": 0.4125, - "acc,exam_id__2015-17": 0.5897435897435898, - "acc,exam_id__2017-24": 0.475, - "acc,exam_id__2014-15": 0.5897435897435898, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2011-03": 0.37373737373737376, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2012-08": 0.5125, - "acc,exam_id__2012-06a": 0.4625, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2015-18": 0.525, - "acc,exam_id__2012-06": 0.4, - "acc,exam_id__2014-14": 0.55, - "acc,exam_id__2013-10": 0.5, - "acc,exam_id__2013-12": 0.5375, - "acc,exam_id__2016-20": 0.525, - "acc,exam_id__2011-04": 0.5, - "acc,exam_id__2010-02": 0.46, - "acc,exam_id__2016-20a": 0.375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7202839396628216, - "acc,all": 0.7497062279670975 - }, - "tweetsentbr": { - "f1_macro,all": 0.5304536659002577, - "acc,all": 0.736318407960199, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.924836149775025, + "acc,all": 0.9248366013071896, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8120497093970859, + "mse,all": 0.41340784803921565, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.6008344923504868, + "acc,exam_id__USP_2019": 0.6, + "acc,exam_id__USP_2022": 0.5510204081632653, + "acc,exam_id__UNICAMP_2023": 0.6511627906976745, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2024": 0.6, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__UNICAMP_2021_2": 0.6274509803921569, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__USP_2024": 0.8292682926829268, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__UNICAMP_2018": 0.5740740740740741, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6934919524142757, + "acc,exam_id__2016": 0.6776859504132231, + "acc,exam_id__2013": 0.6574074074074074, + "acc,exam_id__2011": 0.7521367521367521, + "acc,exam_id__2009": 0.7043478260869566, + "acc,exam_id__2016_2": 0.6422764227642277, + "acc,exam_id__2022": 0.631578947368421, + "acc,exam_id__2014": 0.6788990825688074, + "acc,exam_id__2015": 0.6890756302521008, + "acc,exam_id__2017": 0.7327586206896551, + "acc,exam_id__2010": 0.717948717948718, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2023": 0.725925925925926 + }, + "faquad_nli": { + "f1_macro,all": 0.7755116879339805, + "acc,all": 0.8246153846153846, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8180089679918182, + "acc,all": 0.8221428571428572 + }, + "oab_exams": { + "acc,all": 0.46560364464692483, + "acc,exam_id__2011-05": 0.5, + "acc,exam_id__2016-21": 0.4625, + "acc,exam_id__2017-22": 0.5, + "acc,exam_id__2013-11": 0.4625, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2014-13": 0.4125, + "acc,exam_id__2015-17": 0.5897435897435898, + "acc,exam_id__2017-24": 0.475, + "acc,exam_id__2014-15": 0.5897435897435898, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2011-03": 0.37373737373737376, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2012-08": 0.5125, + "acc,exam_id__2012-06a": 0.4625, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2015-18": 0.525, + "acc,exam_id__2012-06": 0.4, + "acc,exam_id__2014-14": 0.55, + "acc,exam_id__2013-10": 0.5, + "acc,exam_id__2013-12": 0.5375, + "acc,exam_id__2016-20": 0.525, + "acc,exam_id__2011-04": 0.5, + "acc,exam_id__2010-02": 0.46, + "acc,exam_id__2016-20a": 0.375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7202839396628216, + "acc,all": 0.7497062279670975 + }, + "tweetsentbr": { + "f1_macro,all": 0.7072715545336767, + "acc,all": 0.736318407960199, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "476cd2a6d938bddb38dfbeb4cb21e3e34303413d", - "model_dtype": "torch.float16", - "model_memory_footprint": 22268366848, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "476cd2a6d938bddb38dfbeb4cb21e3e34303413d", + "model_dtype": "torch.float16", + "model_memory_footprint": 22268366848, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=vicgalle/CarbonBeagle-11B-truthy,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=vicgalle/CarbonBeagle-11B-truthy,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/vicgalle/CarbonBeagle-11B-truthy/results_2024-05-20T00-40-47.200785.json b/vicgalle/CarbonBeagle-11B-truthy/results_2024-05-20T00-40-47.200785.json index 17c6f126cf0b9b89509f250bf09c3381cddb0ab5..abfe81b2cf1dd1fd051ffae9a6777872e371bf2f 100644 --- a/vicgalle/CarbonBeagle-11B-truthy/results_2024-05-20T00-40-47.200785.json +++ b/vicgalle/CarbonBeagle-11B-truthy/results_2024-05-20T00-40-47.200785.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7045638011191863, - "all_grouped_npm": 0.5598610120263747, + "all_grouped_average": 0.7242102331895661, + "all_grouped_npm": 0.5890967740358686, "all_grouped": { "enem_challenge": 0.6934919524142757, "bluex": 0.6008344923504868, @@ -45,7 +45,7 @@ "faquad_nli": 0.7755116879339805, "hatebr_offensive": 0.8180089679918182, "portuguese_hate_speech": 0.7202839396628216, - "tweetsentbr": 0.5304536659002577 + "tweetsentbr": 0.7072715545336767 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6934919524142757, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7755116879339805, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8180089679918182, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7202839396628216, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5304536659002577 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7072715545336767 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6934919524142757, @@ -150,9 +150,9 @@ "main_score": 0.7202839396628216 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5304536659002577, + "f1_macro,all": 0.7072715545336767, "acc,all": 0.736318407960199, - "main_score": 0.5304536659002577 + "main_score": 0.7072715545336767 } }, "config_tasks": { diff --git a/vicgalle/CarbonBeagle-11B/raw_2024-06-21T01-31-39.472689/results.json b/vicgalle/CarbonBeagle-11B/raw_2024-06-21T01-31-39.472689/results.json index f2a680a7a9fa3053dd404550628f4912f717111b..cb91f5c5760d053e075461eb2c64581e92ac73d4 100644 --- a/vicgalle/CarbonBeagle-11B/raw_2024-06-21T01-31-39.472689/results.json +++ b/vicgalle/CarbonBeagle-11B/raw_2024-06-21T01-31-39.472689/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9210937556104944, - "acc,all": 0.9211601307189542, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8146595670108915, - "mse,all": 0.4211640539215686, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5577190542420027, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__UNICAMP_2022": 0.5128205128205128, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__USP_2020": 0.5, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__USP_2021": 0.6153846153846154, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2022": 0.4897959183673469, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6906927921623512, - "acc,exam_id__2015": 0.680672268907563, - "acc,exam_id__2016": 0.71900826446281, - "acc,exam_id__2009": 0.6869565217391305, - "acc,exam_id__2017": 0.7068965517241379, - "acc,exam_id__2013": 0.6574074074074074, - "acc,exam_id__2022": 0.6240601503759399, - "acc,exam_id__2014": 0.6788990825688074, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2023": 0.7333333333333333, - "acc,exam_id__2011": 0.7435897435897436, - "acc,exam_id__2012": 0.6896551724137931, - "acc,exam_id__2010": 0.717948717948718 - }, - "faquad_nli": { - "f1_macro,all": 0.751597669118611, - "acc,all": 0.7892307692307692, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8482580645161291, - "acc,all": 0.85 - }, - "oab_exams": { - "acc,all": 0.47425968109339406, - "acc,exam_id__2013-10": 0.45, - "acc,exam_id__2016-21": 0.45, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2015-18": 0.55, - "acc,exam_id__2016-20": 0.55, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2010-01": 0.27058823529411763, - "acc,exam_id__2012-08": 0.5375, - "acc,exam_id__2017-22": 0.5, - "acc,exam_id__2012-06a": 0.45, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2014-15": 0.6025641025641025, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2012-07": 0.4125, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2010-02": 0.47, - "acc,exam_id__2011-05": 0.5125, - "acc,exam_id__2013-11": 0.475, - "acc,exam_id__2015-17": 0.6410256410256411, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2017-24": 0.525, - "acc,exam_id__2013-12": 0.5375, - "acc,exam_id__2015-16": 0.4625, - "acc,exam_id__2011-04": 0.4375, - "acc,exam_id__2014-13": 0.425, - "acc,exam_id__2012-09": 0.36363636363636365, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6870704747394097, - "acc,all": 0.7050528789659224 - }, - "tweetsentbr": { - "f1_macro,all": 0.5220275792806917, - "acc,all": 0.7328358208955223, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9210937556104944, + "acc,all": 0.9211601307189542, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8146595670108915, + "mse,all": 0.4211640539215686, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5577190542420027, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__UNICAMP_2022": 0.5128205128205128, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__USP_2020": 0.5, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__USP_2021": 0.6153846153846154, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2022": 0.4897959183673469, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6906927921623512, + "acc,exam_id__2015": 0.680672268907563, + "acc,exam_id__2016": 0.71900826446281, + "acc,exam_id__2009": 0.6869565217391305, + "acc,exam_id__2017": 0.7068965517241379, + "acc,exam_id__2013": 0.6574074074074074, + "acc,exam_id__2022": 0.6240601503759399, + "acc,exam_id__2014": 0.6788990825688074, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2023": 0.7333333333333333, + "acc,exam_id__2011": 0.7435897435897436, + "acc,exam_id__2012": 0.6896551724137931, + "acc,exam_id__2010": 0.717948717948718 + }, + "faquad_nli": { + "f1_macro,all": 0.751597669118611, + "acc,all": 0.7892307692307692, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8482580645161291, + "acc,all": 0.85 + }, + "oab_exams": { + "acc,all": 0.47425968109339406, + "acc,exam_id__2013-10": 0.45, + "acc,exam_id__2016-21": 0.45, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2015-18": 0.55, + "acc,exam_id__2016-20": 0.55, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2010-01": 0.27058823529411763, + "acc,exam_id__2012-08": 0.5375, + "acc,exam_id__2017-22": 0.5, + "acc,exam_id__2012-06a": 0.45, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2014-15": 0.6025641025641025, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2012-07": 0.4125, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2010-02": 0.47, + "acc,exam_id__2011-05": 0.5125, + "acc,exam_id__2013-11": 0.475, + "acc,exam_id__2015-17": 0.6410256410256411, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2017-24": 0.525, + "acc,exam_id__2013-12": 0.5375, + "acc,exam_id__2015-16": 0.4625, + "acc,exam_id__2011-04": 0.4375, + "acc,exam_id__2014-13": 0.425, + "acc,exam_id__2012-09": 0.36363636363636365, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6870704747394097, + "acc,all": 0.7050528789659224 + }, + "tweetsentbr": { + "f1_macro,all": 0.6960367723742555, + "acc,all": 0.7328358208955223, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "3fe9bf5327606d013b182fed17a472f5f043759b", - "model_dtype": "torch.float16", - "model_memory_footprint": 22268366848, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 8, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "3fe9bf5327606d013b182fed17a472f5f043759b", + "model_dtype": "torch.float16", + "model_memory_footprint": 22268366848, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 8, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=vicgalle/CarbonBeagle-11B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=vicgalle/CarbonBeagle-11B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/vicgalle/CarbonBeagle-11B/results_2024-06-21T01-31-39.472689.json b/vicgalle/CarbonBeagle-11B/results_2024-06-21T01-31-39.472689.json index e1a9d3148cdc6f81434a7a6d805fa76beb42a68f..7f292886644cc680031b14b4d08a6cad93e22852 100644 --- a/vicgalle/CarbonBeagle-11B/results_2024-06-21T01-31-39.472689.json +++ b/vicgalle/CarbonBeagle-11B/results_2024-06-21T01-31-39.472689.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6963754041971083, - "all_grouped_npm": 0.5473926792450741, + "all_grouped_average": 0.7157097589852821, + "all_grouped_npm": 0.5761640405369993, "all_grouped": { "enem_challenge": 0.6906927921623512, "bluex": 0.5577190542420027, @@ -45,7 +45,7 @@ "faquad_nli": 0.751597669118611, "hatebr_offensive": 0.8482580645161291, "portuguese_hate_speech": 0.6870704747394097, - "tweetsentbr": 0.5220275792806917 + "tweetsentbr": 0.6960367723742555 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6906927921623512, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.751597669118611, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8482580645161291, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6870704747394097, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5220275792806917 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6960367723742555 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6906927921623512, @@ -150,9 +150,9 @@ "main_score": 0.6870704747394097 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5220275792806917, + "f1_macro,all": 0.6960367723742555, "acc,all": 0.7328358208955223, - "main_score": 0.5220275792806917 + "main_score": 0.6960367723742555 } }, "config_tasks": { diff --git a/vicgalle/ConfigurableBeagle-11B/raw_2024-06-15T01-31-31.325184/results.json b/vicgalle/ConfigurableBeagle-11B/raw_2024-06-15T01-31-31.325184/results.json index 7e4f3b1dcb335cf196dda2685e694625acd9de04..c1852e0b66ac241d42261889fd11cc4c92200ca0 100644 --- a/vicgalle/ConfigurableBeagle-11B/raw_2024-06-15T01-31-31.325184/results.json +++ b/vicgalle/ConfigurableBeagle-11B/raw_2024-06-15T01-31-31.325184/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9276957767969884, - "acc,all": 0.9276960784313726, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8158043682731361, - "mse,all": 0.397863112745098, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5897079276773296, - "acc,exam_id__USP_2022": 0.6326530612244898, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2020": 0.6363636363636364, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__USP_2021": 0.6346153846153846, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6878936319104269, - "acc,exam_id__2015": 0.680672268907563, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2009": 0.6782608695652174, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2016": 0.6942148760330579, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2022": 0.6541353383458647, - "acc,exam_id__2023": 0.7037037037037037, - "acc,exam_id__2013": 0.7129629629629629, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2014": 0.6880733944954128 - }, - "faquad_nli": { - "f1_macro,all": 0.7782188011607123, - "acc,all": 0.8323076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8290936613488302, - "acc,all": 0.8321428571428572 - }, - "oab_exams": { - "acc,all": 0.475626423690205, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2012-08": 0.5625, - "acc,exam_id__2013-11": 0.5125, - "acc,exam_id__2011-03": 0.37373737373737376, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2014-14": 0.45, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2015-18": 0.55, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2010-02": 0.51, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2011-04": 0.525, - "acc,exam_id__2017-24": 0.55, - "acc,exam_id__2011-05": 0.575, - "acc,exam_id__2016-21": 0.425, - "acc,exam_id__2014-15": 0.6025641025641025, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2016-20": 0.5625, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2013-12": 0.5625, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7170439243829598, - "acc,all": 0.7403055229142186 - }, - "tweetsentbr": { - "f1_macro,all": 0.5306250602382254, - "acc,all": 0.7353233830845771, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9276957767969884, + "acc,all": 0.9276960784313726, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8158043682731361, + "mse,all": 0.397863112745098, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5897079276773296, + "acc,exam_id__USP_2022": 0.6326530612244898, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2020": 0.6363636363636364, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__USP_2021": 0.6346153846153846, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6878936319104269, + "acc,exam_id__2015": 0.680672268907563, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2009": 0.6782608695652174, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2016": 0.6942148760330579, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2022": 0.6541353383458647, + "acc,exam_id__2023": 0.7037037037037037, + "acc,exam_id__2013": 0.7129629629629629, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2014": 0.6880733944954128 + }, + "faquad_nli": { + "f1_macro,all": 0.7782188011607123, + "acc,all": 0.8323076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8290936613488302, + "acc,all": 0.8321428571428572 + }, + "oab_exams": { + "acc,all": 0.475626423690205, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2012-08": 0.5625, + "acc,exam_id__2013-11": 0.5125, + "acc,exam_id__2011-03": 0.37373737373737376, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2014-14": 0.45, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2015-18": 0.55, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2010-02": 0.51, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2011-04": 0.525, + "acc,exam_id__2017-24": 0.55, + "acc,exam_id__2011-05": 0.575, + "acc,exam_id__2016-21": 0.425, + "acc,exam_id__2014-15": 0.6025641025641025, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2016-20": 0.5625, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2013-12": 0.5625, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7170439243829598, + "acc,all": 0.7403055229142186 + }, + "tweetsentbr": { + "f1_macro,all": 0.7075000803176339, + "acc,all": 0.7353233830845771, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd", - "model_dtype": "torch.float16", - "model_memory_footprint": 22268366848, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd", + "model_dtype": "torch.float16", + "model_memory_footprint": 22268366848, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=vicgalle/ConfigurableBeagle-11B,dtype=float16,device=cuda:0,revision=bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=vicgalle/ConfigurableBeagle-11B,dtype=float16,device=cuda:0,revision=bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/vicgalle/ConfigurableBeagle-11B/raw_2024-06-15T01-31-31.737501/results.json b/vicgalle/ConfigurableBeagle-11B/raw_2024-06-15T01-31-31.737501/results.json index ea22c620d4983055e4429533d43cc14f6eff4ee1..7ce3b25f74ce4a2bbc23607c5f7f2bb857e91f17 100644 --- a/vicgalle/ConfigurableBeagle-11B/raw_2024-06-15T01-31-31.737501/results.json +++ b/vicgalle/ConfigurableBeagle-11B/raw_2024-06-15T01-31-31.737501/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9276957767969884, - "acc,all": 0.9276960784313726, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8158043682731361, - "mse,all": 0.397863112745098, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5897079276773296, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__USP_2021": 0.6346153846153846, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__USP_2022": 0.6326530612244898, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__UNICAMP_2020": 0.6363636363636364, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6878936319104269, - "acc,exam_id__2015": 0.680672268907563, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2023": 0.7037037037037037, - "acc,exam_id__2013": 0.7129629629629629, - "acc,exam_id__2014": 0.6880733944954128, - "acc,exam_id__2022": 0.6541353383458647, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2016": 0.6942148760330579, - "acc,exam_id__2009": 0.6782608695652174, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2012": 0.7155172413793104 - }, - "faquad_nli": { - "f1_macro,all": 0.7782188011607123, - "acc,all": 0.8323076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8290936613488302, - "acc,all": 0.8321428571428572 - }, - "oab_exams": { - "acc,all": 0.475626423690205, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2011-04": 0.525, - "acc,exam_id__2016-21": 0.425, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2014-14": 0.45, - "acc,exam_id__2014-15": 0.6025641025641025, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2010-02": 0.51, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2013-11": 0.5125, - "acc,exam_id__2011-05": 0.575, - "acc,exam_id__2012-08": 0.5625, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2016-20": 0.5625, - "acc,exam_id__2017-24": 0.55, - "acc,exam_id__2011-03": 0.37373737373737376, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2015-18": 0.55, - "acc,exam_id__2016-20a": 0.4, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7170439243829598, - "acc,all": 0.7403055229142186 - }, - "tweetsentbr": { - "f1_macro,all": 0.5306250602382254, - "acc,all": 0.7353233830845771, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9276957767969884, + "acc,all": 0.9276960784313726, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8158043682731361, + "mse,all": 0.397863112745098, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5897079276773296, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__USP_2021": 0.6346153846153846, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__USP_2022": 0.6326530612244898, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__UNICAMP_2020": 0.6363636363636364, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6878936319104269, + "acc,exam_id__2015": 0.680672268907563, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2023": 0.7037037037037037, + "acc,exam_id__2013": 0.7129629629629629, + "acc,exam_id__2014": 0.6880733944954128, + "acc,exam_id__2022": 0.6541353383458647, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2016": 0.6942148760330579, + "acc,exam_id__2009": 0.6782608695652174, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2012": 0.7155172413793104 + }, + "faquad_nli": { + "f1_macro,all": 0.7782188011607123, + "acc,all": 0.8323076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8290936613488302, + "acc,all": 0.8321428571428572 + }, + "oab_exams": { + "acc,all": 0.475626423690205, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2011-04": 0.525, + "acc,exam_id__2016-21": 0.425, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2014-14": 0.45, + "acc,exam_id__2014-15": 0.6025641025641025, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2010-02": 0.51, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2013-11": 0.5125, + "acc,exam_id__2011-05": 0.575, + "acc,exam_id__2012-08": 0.5625, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2016-20": 0.5625, + "acc,exam_id__2017-24": 0.55, + "acc,exam_id__2011-03": 0.37373737373737376, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2015-18": 0.55, + "acc,exam_id__2016-20a": 0.4, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7170439243829598, + "acc,all": 0.7403055229142186 + }, + "tweetsentbr": { + "f1_macro,all": 0.7075000803176339, + "acc,all": 0.7353233830845771, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd", - "model_dtype": "torch.float16", - "model_memory_footprint": 22268366848, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd", + "model_dtype": "torch.float16", + "model_memory_footprint": 22268366848, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=vicgalle/ConfigurableBeagle-11B,dtype=float16,device=cuda:0,revision=bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=vicgalle/ConfigurableBeagle-11B,dtype=float16,device=cuda:0,revision=bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/vicgalle/ConfigurableBeagle-11B/raw_2024-06-16T01-31-29.633353/results.json b/vicgalle/ConfigurableBeagle-11B/raw_2024-06-16T01-31-29.633353/results.json index 4fd0b3ad1fb8cf2f2c470d649e3563e33b362acc..52c4202e1f3a66a736544a630e7522d852ad13c9 100644 --- a/vicgalle/ConfigurableBeagle-11B/raw_2024-06-16T01-31-29.633353/results.json +++ b/vicgalle/ConfigurableBeagle-11B/raw_2024-06-16T01-31-29.633353/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9276957767969884, - "acc,all": 0.9276960784313726, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8158043682731361, - "mse,all": 0.397863112745098, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5897079276773296, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2021": 0.6346153846153846, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__USP_2022": 0.6326530612244898, - "acc,exam_id__UNICAMP_2020": 0.6363636363636364, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__USP_2019": 0.55, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6878936319104269, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2023": 0.7037037037037037, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2016": 0.6942148760330579, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2022": 0.6541353383458647, - "acc,exam_id__2013": 0.7129629629629629, - "acc,exam_id__2014": 0.6880733944954128, - "acc,exam_id__2009": 0.6782608695652174, - "acc,exam_id__2015": 0.680672268907563 - }, - "faquad_nli": { - "f1_macro,all": 0.7782188011607123, - "acc,all": 0.8323076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8290936613488302, - "acc,all": 0.8321428571428572 - }, - "oab_exams": { - "acc,all": 0.475626423690205, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2011-04": 0.525, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2015-18": 0.55, - "acc,exam_id__2011-05": 0.575, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2016-21": 0.425, - "acc,exam_id__2013-11": 0.5125, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2011-03": 0.37373737373737376, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2014-15": 0.6025641025641025, - "acc,exam_id__2012-08": 0.5625, - "acc,exam_id__2010-02": 0.51, - "acc,exam_id__2014-14": 0.45, - "acc,exam_id__2016-20": 0.5625, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2017-24": 0.55, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7170439243829598, - "acc,all": 0.7403055229142186 - }, - "tweetsentbr": { - "f1_macro,all": 0.5306250602382254, - "acc,all": 0.7353233830845771, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9276957767969884, + "acc,all": 0.9276960784313726, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8158043682731361, + "mse,all": 0.397863112745098, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5897079276773296, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2021": 0.6346153846153846, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__USP_2022": 0.6326530612244898, + "acc,exam_id__UNICAMP_2020": 0.6363636363636364, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__USP_2019": 0.55, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6878936319104269, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2023": 0.7037037037037037, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2016": 0.6942148760330579, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2022": 0.6541353383458647, + "acc,exam_id__2013": 0.7129629629629629, + "acc,exam_id__2014": 0.6880733944954128, + "acc,exam_id__2009": 0.6782608695652174, + "acc,exam_id__2015": 0.680672268907563 + }, + "faquad_nli": { + "f1_macro,all": 0.7782188011607123, + "acc,all": 0.8323076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8290936613488302, + "acc,all": 0.8321428571428572 + }, + "oab_exams": { + "acc,all": 0.475626423690205, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2011-04": 0.525, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2015-18": 0.55, + "acc,exam_id__2011-05": 0.575, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2016-21": 0.425, + "acc,exam_id__2013-11": 0.5125, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2011-03": 0.37373737373737376, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2014-15": 0.6025641025641025, + "acc,exam_id__2012-08": 0.5625, + "acc,exam_id__2010-02": 0.51, + "acc,exam_id__2014-14": 0.45, + "acc,exam_id__2016-20": 0.5625, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2017-24": 0.55, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7170439243829598, + "acc,all": 0.7403055229142186 + }, + "tweetsentbr": { + "f1_macro,all": 0.7075000803176339, + "acc,all": 0.7353233830845771, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd", - "model_dtype": "torch.float16", - "model_memory_footprint": 22268366848, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd", + "model_dtype": "torch.float16", + "model_memory_footprint": 22268366848, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=vicgalle/ConfigurableBeagle-11B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=vicgalle/ConfigurableBeagle-11B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/vicgalle/ConfigurableBeagle-11B/raw_2024-06-16T01-31-29.648962/results.json b/vicgalle/ConfigurableBeagle-11B/raw_2024-06-16T01-31-29.648962/results.json index 03d0d1cf685b623f83662a428d7de9bc6330587d..0fb29db6bc9d20404bc23bcbe1175681cf15b3dc 100644 --- a/vicgalle/ConfigurableBeagle-11B/raw_2024-06-16T01-31-29.648962/results.json +++ b/vicgalle/ConfigurableBeagle-11B/raw_2024-06-16T01-31-29.648962/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9276957767969884, - "acc,all": 0.9276960784313726, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8158043682731361, - "mse,all": 0.397863112745098, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5897079276773296, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2019": 0.55, - "acc,exam_id__UNICAMP_2021_1": 0.5, - "acc,exam_id__USP_2020": 0.5178571428571429, - "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, - "acc,exam_id__UNICAMP_2024": 0.5777777777777777, - "acc,exam_id__USP_2023": 0.7045454545454546, - "acc,exam_id__USP_2022": 0.6326530612244898, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__USP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__UNICAMP_2018": 0.5185185185185185, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__USP_2021": 0.6346153846153846, - "acc,exam_id__UNICAMP_2020": 0.6363636363636364, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6878936319104269, - "acc,exam_id__2014": 0.6880733944954128, - "acc,exam_id__2016_2": 0.6504065040650406, - "acc,exam_id__2017": 0.6810344827586207, - "acc,exam_id__2011": 0.7008547008547008, - "acc,exam_id__2010": 0.7008547008547008, - "acc,exam_id__2022": 0.6541353383458647, - "acc,exam_id__2009": 0.6782608695652174, - "acc,exam_id__2015": 0.680672268907563, - "acc,exam_id__2023": 0.7037037037037037, - "acc,exam_id__2012": 0.7155172413793104, - "acc,exam_id__2016": 0.6942148760330579, - "acc,exam_id__2013": 0.7129629629629629 - }, - "faquad_nli": { - "f1_macro,all": 0.7782188011607123, - "acc,all": 0.8323076923076923, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8290936613488302, - "acc,all": 0.8321428571428572 - }, - "oab_exams": { - "acc,all": 0.475626423690205, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2010-01": 0.32941176470588235, - "acc,exam_id__2010-02": 0.51, - "acc,exam_id__2012-08": 0.5625, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2014-13": 0.4, - "acc,exam_id__2012-09": 0.33766233766233766, - "acc,exam_id__2013-11": 0.5125, - "acc,exam_id__2011-03": 0.37373737373737376, - "acc,exam_id__2017-24": 0.55, - "acc,exam_id__2012-06a": 0.425, - "acc,exam_id__2011-05": 0.575, - "acc,exam_id__2014-15": 0.6025641025641025, - "acc,exam_id__2016-19": 0.4358974358974359, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2016-21": 0.425, - "acc,exam_id__2018-25": 0.45, - "acc,exam_id__2014-14": 0.45, - "acc,exam_id__2013-10": 0.4625, - "acc,exam_id__2016-20": 0.5625, - "acc,exam_id__2017-23": 0.5, - "acc,exam_id__2012-07": 0.4, - "acc,exam_id__2015-18": 0.55, - "acc,exam_id__2015-16": 0.45, - "acc,exam_id__2013-12": 0.5625, - "acc,exam_id__2011-04": 0.525, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.7170439243829598, - "acc,all": 0.7403055229142186 - }, - "tweetsentbr": { - "f1_macro,all": 0.5306250602382254, - "acc,all": 0.7353233830845771, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9276957767969884, + "acc,all": 0.9276960784313726, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8158043682731361, + "mse,all": 0.397863112745098, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5897079276773296, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2019": 0.55, + "acc,exam_id__UNICAMP_2021_1": 0.5, + "acc,exam_id__USP_2020": 0.5178571428571429, + "acc,exam_id__UNICAMP_2021_2": 0.5882352941176471, + "acc,exam_id__UNICAMP_2024": 0.5777777777777777, + "acc,exam_id__USP_2023": 0.7045454545454546, + "acc,exam_id__USP_2022": 0.6326530612244898, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__USP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__UNICAMP_2018": 0.5185185185185185, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__USP_2021": 0.6346153846153846, + "acc,exam_id__UNICAMP_2020": 0.6363636363636364, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6878936319104269, + "acc,exam_id__2014": 0.6880733944954128, + "acc,exam_id__2016_2": 0.6504065040650406, + "acc,exam_id__2017": 0.6810344827586207, + "acc,exam_id__2011": 0.7008547008547008, + "acc,exam_id__2010": 0.7008547008547008, + "acc,exam_id__2022": 0.6541353383458647, + "acc,exam_id__2009": 0.6782608695652174, + "acc,exam_id__2015": 0.680672268907563, + "acc,exam_id__2023": 0.7037037037037037, + "acc,exam_id__2012": 0.7155172413793104, + "acc,exam_id__2016": 0.6942148760330579, + "acc,exam_id__2013": 0.7129629629629629 + }, + "faquad_nli": { + "f1_macro,all": 0.7782188011607123, + "acc,all": 0.8323076923076923, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8290936613488302, + "acc,all": 0.8321428571428572 + }, + "oab_exams": { + "acc,all": 0.475626423690205, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2010-01": 0.32941176470588235, + "acc,exam_id__2010-02": 0.51, + "acc,exam_id__2012-08": 0.5625, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2014-13": 0.4, + "acc,exam_id__2012-09": 0.33766233766233766, + "acc,exam_id__2013-11": 0.5125, + "acc,exam_id__2011-03": 0.37373737373737376, + "acc,exam_id__2017-24": 0.55, + "acc,exam_id__2012-06a": 0.425, + "acc,exam_id__2011-05": 0.575, + "acc,exam_id__2014-15": 0.6025641025641025, + "acc,exam_id__2016-19": 0.4358974358974359, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2016-21": 0.425, + "acc,exam_id__2018-25": 0.45, + "acc,exam_id__2014-14": 0.45, + "acc,exam_id__2013-10": 0.4625, + "acc,exam_id__2016-20": 0.5625, + "acc,exam_id__2017-23": 0.5, + "acc,exam_id__2012-07": 0.4, + "acc,exam_id__2015-18": 0.55, + "acc,exam_id__2015-16": 0.45, + "acc,exam_id__2013-12": 0.5625, + "acc,exam_id__2011-04": 0.525, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.7170439243829598, + "acc,all": 0.7403055229142186 + }, + "tweetsentbr": { + "f1_macro,all": 0.7075000803176339, + "acc,all": 0.7353233830845771, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd", - "model_dtype": "torch.float16", - "model_memory_footprint": 22268366848, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "bbc16dbf94b8e8a99bb3e2ada6755faf9c2990dd", + "model_dtype": "torch.float16", + "model_memory_footprint": 22268366848, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=vicgalle/ConfigurableBeagle-11B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=vicgalle/ConfigurableBeagle-11B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "2d67fba" + "git_hash": "2d67fba" } \ No newline at end of file diff --git a/vicgalle/ConfigurableBeagle-11B/results_2024-06-15T01-31-31.325184.json b/vicgalle/ConfigurableBeagle-11B/results_2024-06-15T01-31-31.325184.json index df3d169aacd0329b0763a04b1afc17f0bf0f7fc7..b6d514e31a79d9cf9dbd23ab2236dcbf866d48a5 100644 --- a/vicgalle/ConfigurableBeagle-11B/results_2024-06-15T01-31-31.325184.json +++ b/vicgalle/ConfigurableBeagle-11B/results_2024-06-15T01-31-31.325184.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7057455083865348, - "all_grouped_npm": 0.5623793197619755, + "all_grouped_average": 0.725398288395358, + "all_grouped_npm": 0.5916245281084385, "all_grouped": { "enem_challenge": 0.6878936319104269, "bluex": 0.5897079276773296, @@ -45,7 +45,7 @@ "faquad_nli": 0.7782188011607123, "hatebr_offensive": 0.8290936613488302, "portuguese_hate_speech": 0.7170439243829598, - "tweetsentbr": 0.5306250602382254 + "tweetsentbr": 0.7075000803176339 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6878936319104269, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7782188011607123, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8290936613488302, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7170439243829598, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5306250602382254 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7075000803176339 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6878936319104269, @@ -150,9 +150,9 @@ "main_score": 0.7170439243829598 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5306250602382254, + "f1_macro,all": 0.7075000803176339, "acc,all": 0.7353233830845771, - "main_score": 0.5306250602382254 + "main_score": 0.7075000803176339 } }, "config_tasks": { diff --git a/vicgalle/ConfigurableBeagle-11B/results_2024-06-15T01-31-31.737501.json b/vicgalle/ConfigurableBeagle-11B/results_2024-06-15T01-31-31.737501.json index 1f7a1bc578a539585cac0faff36e8e63e9ef5e19..2655609bd5a746066c5050ee14d4573834bf5a43 100644 --- a/vicgalle/ConfigurableBeagle-11B/results_2024-06-15T01-31-31.737501.json +++ b/vicgalle/ConfigurableBeagle-11B/results_2024-06-15T01-31-31.737501.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7057455083865348, - "all_grouped_npm": 0.5623793197619755, + "all_grouped_average": 0.725398288395358, + "all_grouped_npm": 0.5916245281084385, "all_grouped": { "enem_challenge": 0.6878936319104269, "bluex": 0.5897079276773296, @@ -45,7 +45,7 @@ "faquad_nli": 0.7782188011607123, "hatebr_offensive": 0.8290936613488302, "portuguese_hate_speech": 0.7170439243829598, - "tweetsentbr": 0.5306250602382254 + "tweetsentbr": 0.7075000803176339 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6878936319104269, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7782188011607123, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8290936613488302, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7170439243829598, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5306250602382254 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7075000803176339 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6878936319104269, @@ -150,9 +150,9 @@ "main_score": 0.7170439243829598 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5306250602382254, + "f1_macro,all": 0.7075000803176339, "acc,all": 0.7353233830845771, - "main_score": 0.5306250602382254 + "main_score": 0.7075000803176339 } }, "config_tasks": { diff --git a/vicgalle/ConfigurableBeagle-11B/results_2024-06-16T01-31-29.633353.json b/vicgalle/ConfigurableBeagle-11B/results_2024-06-16T01-31-29.633353.json index 6d20ca259f871de6268929a286afdb62e74f9314..844197d97f4fa699bb85d5c08c4d77fe55a291c9 100644 --- a/vicgalle/ConfigurableBeagle-11B/results_2024-06-16T01-31-29.633353.json +++ b/vicgalle/ConfigurableBeagle-11B/results_2024-06-16T01-31-29.633353.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7057455083865348, - "all_grouped_npm": 0.5623793197619755, + "all_grouped_average": 0.725398288395358, + "all_grouped_npm": 0.5916245281084385, "all_grouped": { "enem_challenge": 0.6878936319104269, "bluex": 0.5897079276773296, @@ -45,7 +45,7 @@ "faquad_nli": 0.7782188011607123, "hatebr_offensive": 0.8290936613488302, "portuguese_hate_speech": 0.7170439243829598, - "tweetsentbr": 0.5306250602382254 + "tweetsentbr": 0.7075000803176339 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6878936319104269, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7782188011607123, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8290936613488302, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7170439243829598, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5306250602382254 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7075000803176339 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6878936319104269, @@ -150,9 +150,9 @@ "main_score": 0.7170439243829598 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5306250602382254, + "f1_macro,all": 0.7075000803176339, "acc,all": 0.7353233830845771, - "main_score": 0.5306250602382254 + "main_score": 0.7075000803176339 } }, "config_tasks": { diff --git a/vicgalle/ConfigurableBeagle-11B/results_2024-06-16T01-31-29.648962.json b/vicgalle/ConfigurableBeagle-11B/results_2024-06-16T01-31-29.648962.json index 4f562ab6f90c586b58e8a531e9fec351e378662e..75d8753213ecbd9c475bb991a356cbfef0ab457f 100644 --- a/vicgalle/ConfigurableBeagle-11B/results_2024-06-16T01-31-29.648962.json +++ b/vicgalle/ConfigurableBeagle-11B/results_2024-06-16T01-31-29.648962.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.7057455083865348, - "all_grouped_npm": 0.5623793197619755, + "all_grouped_average": 0.725398288395358, + "all_grouped_npm": 0.5916245281084385, "all_grouped": { "enem_challenge": 0.6878936319104269, "bluex": 0.5897079276773296, @@ -45,7 +45,7 @@ "faquad_nli": 0.7782188011607123, "hatebr_offensive": 0.8290936613488302, "portuguese_hate_speech": 0.7170439243829598, - "tweetsentbr": 0.5306250602382254 + "tweetsentbr": 0.7075000803176339 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6878936319104269, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7782188011607123, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8290936613488302, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7170439243829598, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5306250602382254 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7075000803176339 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6878936319104269, @@ -150,9 +150,9 @@ "main_score": 0.7170439243829598 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5306250602382254, + "f1_macro,all": 0.7075000803176339, "acc,all": 0.7353233830845771, - "main_score": 0.5306250602382254 + "main_score": 0.7075000803176339 } }, "config_tasks": { diff --git a/vicgalle/ConfigurableSOLAR-10.7B/raw_2024-08-26T05-22-16.175643/results.json b/vicgalle/ConfigurableSOLAR-10.7B/raw_2024-08-26T05-22-16.175643/results.json index c92032a3fa6e6149b3a55855e7a6801a5557c979..937d5aaaf890d593dfbc3a0443d1ab4ebd7d37d5 100644 --- a/vicgalle/ConfigurableSOLAR-10.7B/raw_2024-08-26T05-22-16.175643/results.json +++ b/vicgalle/ConfigurableSOLAR-10.7B/raw_2024-08-26T05-22-16.175643/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9165101276141576, - "acc,all": 0.9166666666666666, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.8271838931379497, - "mse,all": 0.3858708431372549, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5785813630041725, - "acc,exam_id__UNICAMP_2022": 0.5897435897435898, - "acc,exam_id__USP_2021": 0.5961538461538461, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__UNICAMP_2021_1": 0.6304347826086957, - "acc,exam_id__USP_2019": 0.5, - "acc,exam_id__USP_2018": 0.5, - "acc,exam_id__UNICAMP_2018": 0.5555555555555556, - "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, - "acc,exam_id__UNICAMP_2024": 0.6222222222222222, - "acc,exam_id__USP_2024": 0.7317073170731707, - "acc,exam_id__USP_2023": 0.6818181818181818, - "acc,exam_id__USP_2022": 0.5306122448979592, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__UNICAMP_2019": 0.54, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6822953114065781, - "acc,exam_id__2011": 0.7435897435897436, - "acc,exam_id__2009": 0.6434782608695652, - "acc,exam_id__2012": 0.6896551724137931, - "acc,exam_id__2022": 0.6165413533834586, - "acc,exam_id__2010": 0.7264957264957265, - "acc,exam_id__2023": 0.674074074074074, - "acc,exam_id__2016_2": 0.6910569105691057, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2016": 0.6446280991735537, - "acc,exam_id__2015": 0.7478991596638656, - "acc,exam_id__2017": 0.7068965517241379, - "acc,exam_id__2013": 0.6759259259259259 - }, - "faquad_nli": { - "f1_macro,all": 0.7603967695452565, - "acc,all": 0.8138461538461539, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.749459230696887, - "acc,all": 0.7621428571428571 - }, - "oab_exams": { - "acc,all": 0.4560364464692483, - "acc,exam_id__2016-20a": 0.4875, - "acc,exam_id__2014-15": 0.5256410256410257, - "acc,exam_id__2010-01": 0.3058823529411765, - "acc,exam_id__2016-21": 0.425, - "acc,exam_id__2017-22": 0.45, - "acc,exam_id__2011-05": 0.525, - "acc,exam_id__2017-24": 0.4625, - "acc,exam_id__2017-23": 0.4625, - "acc,exam_id__2012-08": 0.5125, - "acc,exam_id__2012-07": 0.425, - "acc,exam_id__2015-18": 0.4875, - "acc,exam_id__2013-10": 0.45, - "acc,exam_id__2016-20": 0.5125, - "acc,exam_id__2013-12": 0.525, - "acc,exam_id__2014-14": 0.525, - "acc,exam_id__2015-16": 0.4375, - "acc,exam_id__2018-25": 0.4125, - "acc,exam_id__2012-06": 0.3875, - "acc,exam_id__2013-11": 0.5375, - "acc,exam_id__2010-02": 0.42, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2016-19": 0.47435897435897434, - "acc,exam_id__2014-13": 0.4125, - "acc,exam_id__2011-04": 0.4375, - "acc,exam_id__2015-17": 0.5897435897435898, - "acc,exam_id__2012-06a": 0.4125, - "acc,exam_id__2011-03": 0.41414141414141414, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.693451792445166, - "acc,all": 0.7238542890716804 - }, - "tweetsentbr": { - "f1_macro,all": 0.5393919169959711, - "acc,all": 0.7348258706467662, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9165101276141576, + "acc,all": 0.9166666666666666, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.8271838931379497, + "mse,all": 0.3858708431372549, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5785813630041725, + "acc,exam_id__UNICAMP_2022": 0.5897435897435898, + "acc,exam_id__USP_2021": 0.5961538461538461, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__UNICAMP_2021_1": 0.6304347826086957, + "acc,exam_id__USP_2019": 0.5, + "acc,exam_id__USP_2018": 0.5, + "acc,exam_id__UNICAMP_2018": 0.5555555555555556, + "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921, + "acc,exam_id__UNICAMP_2024": 0.6222222222222222, + "acc,exam_id__USP_2024": 0.7317073170731707, + "acc,exam_id__USP_2023": 0.6818181818181818, + "acc,exam_id__USP_2022": 0.5306122448979592, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__UNICAMP_2019": 0.54, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6822953114065781, + "acc,exam_id__2011": 0.7435897435897436, + "acc,exam_id__2009": 0.6434782608695652, + "acc,exam_id__2012": 0.6896551724137931, + "acc,exam_id__2022": 0.6165413533834586, + "acc,exam_id__2010": 0.7264957264957265, + "acc,exam_id__2023": 0.674074074074074, + "acc,exam_id__2016_2": 0.6910569105691057, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2016": 0.6446280991735537, + "acc,exam_id__2015": 0.7478991596638656, + "acc,exam_id__2017": 0.7068965517241379, + "acc,exam_id__2013": 0.6759259259259259 + }, + "faquad_nli": { + "f1_macro,all": 0.7603967695452565, + "acc,all": 0.8138461538461539, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.749459230696887, + "acc,all": 0.7621428571428571 + }, + "oab_exams": { + "acc,all": 0.4560364464692483, + "acc,exam_id__2016-20a": 0.4875, + "acc,exam_id__2014-15": 0.5256410256410257, + "acc,exam_id__2010-01": 0.3058823529411765, + "acc,exam_id__2016-21": 0.425, + "acc,exam_id__2017-22": 0.45, + "acc,exam_id__2011-05": 0.525, + "acc,exam_id__2017-24": 0.4625, + "acc,exam_id__2017-23": 0.4625, + "acc,exam_id__2012-08": 0.5125, + "acc,exam_id__2012-07": 0.425, + "acc,exam_id__2015-18": 0.4875, + "acc,exam_id__2013-10": 0.45, + "acc,exam_id__2016-20": 0.5125, + "acc,exam_id__2013-12": 0.525, + "acc,exam_id__2014-14": 0.525, + "acc,exam_id__2015-16": 0.4375, + "acc,exam_id__2018-25": 0.4125, + "acc,exam_id__2012-06": 0.3875, + "acc,exam_id__2013-11": 0.5375, + "acc,exam_id__2010-02": 0.42, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2016-19": 0.47435897435897434, + "acc,exam_id__2014-13": 0.4125, + "acc,exam_id__2011-04": 0.4375, + "acc,exam_id__2015-17": 0.5897435897435898, + "acc,exam_id__2012-06a": 0.4125, + "acc,exam_id__2011-03": 0.41414141414141414, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.693451792445166, + "acc,all": 0.7238542890716804 + }, + "tweetsentbr": { + "f1_macro,all": 0.7191892226612948, + "acc,all": 0.7348258706467662, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 4, - "non_truncated": 14146, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 4, - "has_chat_template": true, - "chat_type": "system_user_assistant", - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "c5c844a7447d952d1a959b2542fb3aeec5e85133", - "model_dtype": "torch.float16", - "model_memory_footprint": 21463060736, - "model_num_parameters": 10731524096, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1502.7455065359477, - "min_seq_length": 1479, - "max_seq_length": 1569, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1711.7455065359477, - "min_seq_length": 1688, - "max_seq_length": 1778, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 2, - "non_truncated": 717, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 2, - "mean_seq_length": 1756.9262865090404, - "min_seq_length": 1380, - "max_seq_length": 2557, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9972183588317107 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 2, - "non_truncated": 1427, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 2, - "mean_seq_length": 1657.039188243527, - "min_seq_length": 1391, - "max_seq_length": 2655, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.998600419874038 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1727.9876923076922, - "min_seq_length": 1672, - "max_seq_length": 1848, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 4, + "non_truncated": 14146, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 4, + "has_chat_template": true, + "chat_type": "system_user_assistant", + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "c5c844a7447d952d1a959b2542fb3aeec5e85133", + "model_dtype": "torch.float16", + "model_memory_footprint": 21463060736, + "model_num_parameters": 10731524096, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1518.3878571428572, - "min_seq_length": 1495, - "max_seq_length": 1769, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1402.764464692483, - "min_seq_length": 1136, - "max_seq_length": 1905, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1502.7455065359477, + "min_seq_length": 1479, + "max_seq_length": 1569, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1711.7455065359477, + "min_seq_length": 1688, + "max_seq_length": 1778, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 2, + "non_truncated": 717, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 2, + "mean_seq_length": 1756.9262865090404, + "min_seq_length": 1380, + "max_seq_length": 2557, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9972183588317107 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 2, + "non_truncated": 1427, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 2, + "mean_seq_length": 1657.039188243527, + "min_seq_length": 1391, + "max_seq_length": 2655, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.998600419874038 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1727.9876923076922, + "min_seq_length": 1672, + "max_seq_length": 1848, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1518.3878571428572, + "min_seq_length": 1495, + "max_seq_length": 1769, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1402.764464692483, + "min_seq_length": 1136, + "max_seq_length": 1905, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2019.3360752056403, + "min_seq_length": 1984, + "max_seq_length": 2058, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1765.2492537313433, + "min_seq_length": 1744, + "max_seq_length": 1860, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2019.3360752056403, - "min_seq_length": 1984, - "max_seq_length": 2058, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=vicgalle/ConfigurableSOLAR-10.7B,dtype=float16,device=cuda:0,revision=c5c844a7447d952d1a959b2542fb3aeec5e85133,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1765.2492537313433, - "min_seq_length": 1744, - "max_seq_length": 1860, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=vicgalle/ConfigurableSOLAR-10.7B,dtype=float16,device=cuda:0,revision=c5c844a7447d952d1a959b2542fb3aeec5e85133,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/vicgalle/ConfigurableSOLAR-10.7B/results_2024-08-26T05-22-16.175643.json b/vicgalle/ConfigurableSOLAR-10.7B/results_2024-08-26T05-22-16.175643.json index 9d7249ec84e3677a3d614b52e19b196817218474..ec5428cf597f0951d2468985781fe6ab9db93b96 100644 --- a/vicgalle/ConfigurableSOLAR-10.7B/results_2024-08-26T05-22-16.175643.json +++ b/vicgalle/ConfigurableSOLAR-10.7B/results_2024-08-26T05-22-16.175643.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6892563168128207, - "all_grouped_npm": 0.5309645491629713, + "all_grouped_average": 0.7092337952200789, + "all_grouped_npm": 0.5606929396499627, "all_grouped": { "enem_challenge": 0.6822953114065781, "bluex": 0.5785813630041725, @@ -45,7 +45,7 @@ "faquad_nli": 0.7603967695452565, "hatebr_offensive": 0.749459230696887, "portuguese_hate_speech": 0.693451792445166, - "tweetsentbr": 0.5393919169959711 + "tweetsentbr": 0.7191892226612948 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6822953114065781, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7603967695452565, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.749459230696887, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.693451792445166, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5393919169959711 + "harness|tweetsentbr|tweetsentbr|None|25": 0.7191892226612948 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6822953114065781, @@ -150,9 +150,9 @@ "main_score": 0.693451792445166 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5393919169959711, + "f1_macro,all": 0.7191892226612948, "acc,all": 0.7348258706467662, - "main_score": 0.5393919169959711 + "main_score": 0.7191892226612948 } }, "config_tasks": { diff --git a/xverse/XVERSE-13B/raw_2024-04-02T16-25-23.716110/results.json b/xverse/XVERSE-13B/raw_2024-04-02T16-25-23.716110/results.json index 5a057498ba286e950d955f8f20b47350e521e2bf..8d5286be0f91eae6d6001e4749a81192be2c0093 100644 --- a/xverse/XVERSE-13B/raw_2024-04-02T16-25-23.716110/results.json +++ b/xverse/XVERSE-13B/raw_2024-04-02T16-25-23.716110/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.853651791779636, - "acc,all": 0.8537581699346405, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.5612184700878184, - "mse,all": 0.8298447712418301, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.42698191933240615, - "acc,exam_id__USP_2021": 0.36538461538461536, - "acc,exam_id__USP_2018": 0.35185185185185186, - "acc,exam_id__UNICAMP_2023": 0.3488372093023256, - "acc,exam_id__USP_2024": 0.5609756097560976, - "acc,exam_id__USP_2022": 0.40816326530612246, - "acc,exam_id__UNICAMP_2019": 0.46, - "acc,exam_id__USP_2020": 0.42857142857142855, - "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, - "acc,exam_id__USP_2019": 0.325, - "acc,exam_id__UNICAMP_2022": 0.41025641025641024, - "acc,exam_id__UNICAMP_2024": 0.5333333333333333, - "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, - "acc,exam_id__UNICAMP_2018": 0.35185185185185186, - "acc,exam_id__USP_2023": 0.5681818181818182, - "acc,exam_id__UNICAMP_2020": 0.45454545454545453, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.42617214835549333, - "acc,exam_id__2013": 0.3888888888888889, - "acc,exam_id__2012": 0.43103448275862066, - "acc,exam_id__2015": 0.5210084033613446, - "acc,exam_id__2016": 0.4214876033057851, - "acc,exam_id__2009": 0.391304347826087, - "acc,exam_id__2023": 0.4, - "acc,exam_id__2016_2": 0.3902439024390244, - "acc,exam_id__2010": 0.42735042735042733, - "acc,exam_id__2014": 0.46788990825688076, - "acc,exam_id__2022": 0.3157894736842105, - "acc,exam_id__2011": 0.5299145299145299, - "acc,exam_id__2017": 0.4482758620689655 - }, - "faquad_nli": { - "f1_macro,all": 0.495072463768116, - "acc,all": 0.7938461538461539, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7506150372392411, - "acc,all": 0.7621428571428571 - }, - "oab_exams": { - "acc,all": 0.29658314350797266, - "acc,exam_id__2011-03": 0.30303030303030304, - "acc,exam_id__2011-04": 0.2875, - "acc,exam_id__2011-05": 0.2625, - "acc,exam_id__2016-19": 0.28205128205128205, - "acc,exam_id__2017-23": 0.275, - "acc,exam_id__2018-25": 0.3125, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2017-24": 0.3125, - "acc,exam_id__2014-14": 0.4, - "acc,exam_id__2015-17": 0.2948717948717949, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2016-20": 0.2, - "acc,exam_id__2013-11": 0.3875, - "acc,exam_id__2016-21": 0.3375, - "acc,exam_id__2012-06a": 0.25, - "acc,exam_id__2015-18": 0.225, - "acc,exam_id__2012-08": 0.3375, - "acc,exam_id__2013-12": 0.3, - "acc,exam_id__2012-06": 0.2625, - "acc,exam_id__2015-16": 0.2625, - "acc,exam_id__2013-10": 0.325, - "acc,exam_id__2014-13": 0.3125, - "acc,exam_id__2010-02": 0.26, - "acc,exam_id__2014-15": 0.358974358974359, - "acc,exam_id__2016-20a": 0.2625, - "acc,exam_id__2010-01": 0.23529411764705882, - "acc,exam_id__2017-22": 0.2875, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.4367778297691163, - "acc,all": 0.44653349001175086 - }, - "tweetsentbr": { - "f1_macro,all": 0.48636217182153496, - "acc,all": 0.7024875621890547, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.853651791779636, + "acc,all": 0.8537581699346405, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.5612184700878184, + "mse,all": 0.8298447712418301, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.42698191933240615, + "acc,exam_id__USP_2021": 0.36538461538461536, + "acc,exam_id__USP_2018": 0.35185185185185186, + "acc,exam_id__UNICAMP_2023": 0.3488372093023256, + "acc,exam_id__USP_2024": 0.5609756097560976, + "acc,exam_id__USP_2022": 0.40816326530612246, + "acc,exam_id__UNICAMP_2019": 0.46, + "acc,exam_id__USP_2020": 0.42857142857142855, + "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476, + "acc,exam_id__USP_2019": 0.325, + "acc,exam_id__UNICAMP_2022": 0.41025641025641024, + "acc,exam_id__UNICAMP_2024": 0.5333333333333333, + "acc,exam_id__UNICAMP_2021_2": 0.4117647058823529, + "acc,exam_id__UNICAMP_2018": 0.35185185185185186, + "acc,exam_id__USP_2023": 0.5681818181818182, + "acc,exam_id__UNICAMP_2020": 0.45454545454545453, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.42617214835549333, + "acc,exam_id__2013": 0.3888888888888889, + "acc,exam_id__2012": 0.43103448275862066, + "acc,exam_id__2015": 0.5210084033613446, + "acc,exam_id__2016": 0.4214876033057851, + "acc,exam_id__2009": 0.391304347826087, + "acc,exam_id__2023": 0.4, + "acc,exam_id__2016_2": 0.3902439024390244, + "acc,exam_id__2010": 0.42735042735042733, + "acc,exam_id__2014": 0.46788990825688076, + "acc,exam_id__2022": 0.3157894736842105, + "acc,exam_id__2011": 0.5299145299145299, + "acc,exam_id__2017": 0.4482758620689655 + }, + "faquad_nli": { + "f1_macro,all": 0.495072463768116, + "acc,all": 0.7938461538461539, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7506150372392411, + "acc,all": 0.7621428571428571 + }, + "oab_exams": { + "acc,all": 0.29658314350797266, + "acc,exam_id__2011-03": 0.30303030303030304, + "acc,exam_id__2011-04": 0.2875, + "acc,exam_id__2011-05": 0.2625, + "acc,exam_id__2016-19": 0.28205128205128205, + "acc,exam_id__2017-23": 0.275, + "acc,exam_id__2018-25": 0.3125, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2017-24": 0.3125, + "acc,exam_id__2014-14": 0.4, + "acc,exam_id__2015-17": 0.2948717948717949, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2016-20": 0.2, + "acc,exam_id__2013-11": 0.3875, + "acc,exam_id__2016-21": 0.3375, + "acc,exam_id__2012-06a": 0.25, + "acc,exam_id__2015-18": 0.225, + "acc,exam_id__2012-08": 0.3375, + "acc,exam_id__2013-12": 0.3, + "acc,exam_id__2012-06": 0.2625, + "acc,exam_id__2015-16": 0.2625, + "acc,exam_id__2013-10": 0.325, + "acc,exam_id__2014-13": 0.3125, + "acc,exam_id__2010-02": 0.26, + "acc,exam_id__2014-15": 0.358974358974359, + "acc,exam_id__2016-20a": 0.2625, + "acc,exam_id__2010-01": 0.23529411764705882, + "acc,exam_id__2017-22": 0.2875, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.4367778297691163, + "acc,all": 0.44653349001175086 + }, + "tweetsentbr": { + "f1_macro,all": 0.6484828957620467, + "acc,all": 0.7024875621890547, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 167, - "non_truncated": 13983, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 203, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "11ac840dda17af81046614229fdd0c658afff747", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 27770859520, - "model_num_parameters": 13717652480, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 2, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1816.750816993464, - "min_seq_length": 1788, - "max_seq_length": 1911, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1960.750816993464, - "min_seq_length": 1932, - "max_seq_length": 2055, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "bluex": { - "sample_size": 719, - "truncated": 91, - "non_truncated": 628, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 122, - "mean_seq_length": 2223.4937413073712, - "min_seq_length": 1720, - "max_seq_length": 3265, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.8303198887343535 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 75, - "non_truncated": 1354, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 80, - "mean_seq_length": 2123.4933519944016, - "min_seq_length": 1758, - "max_seq_length": 3702, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9440167949615117 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 2124.6569230769232, - "min_seq_length": 2050, - "max_seq_length": 2290, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1624.8157142857142, - "min_seq_length": 1596, - "max_seq_length": 1960, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 167, + "non_truncated": 13983, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 203, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "11ac840dda17af81046614229fdd0c658afff747", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 27770859520, + "model_num_parameters": 13717652480, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 2, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 1, - "non_truncated": 2194, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 1, - "mean_seq_length": 1856.5257403189066, - "min_seq_length": 1486, - "max_seq_length": 2576, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9995444191343963 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1816.750816993464, + "min_seq_length": 1788, + "max_seq_length": 1911, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1960.750816993464, + "min_seq_length": 1932, + "max_seq_length": 2055, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 91, + "non_truncated": 628, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 122, + "mean_seq_length": 2223.4937413073712, + "min_seq_length": 1720, + "max_seq_length": 3265, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.8303198887343535 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 75, + "non_truncated": 1354, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 80, + "mean_seq_length": 2123.4933519944016, + "min_seq_length": 1758, + "max_seq_length": 3702, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9440167949615117 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 2124.6569230769232, + "min_seq_length": 2050, + "max_seq_length": 2290, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1624.8157142857142, + "min_seq_length": 1596, + "max_seq_length": 1960, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 1, + "non_truncated": 2194, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 1, + "mean_seq_length": 1856.5257403189066, + "min_seq_length": 1486, + "max_seq_length": 2576, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9995444191343963 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 2300.578143360752, + "min_seq_length": 2254, + "max_seq_length": 2343, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 2088.7278606965174, + "min_seq_length": 2061, + "max_seq_length": 2157, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 2300.578143360752, - "min_seq_length": 2254, - "max_seq_length": 2343, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=xverse/XVERSE-13B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 2088.7278606965174, - "min_seq_length": 2061, - "max_seq_length": 2157, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=xverse/XVERSE-13B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": null + "git_hash": null } \ No newline at end of file diff --git a/xverse/XVERSE-13B/results_2024-04-02T16-25-23.716110.json b/xverse/XVERSE-13B/results_2024-04-02T16-25-23.716110.json index 04c88422dfff83e743bc928dba050cb1e6425f54..9e27e28ccda6c3f6c78354b97ac45a895e69c746 100644 --- a/xverse/XVERSE-13B/results_2024-04-02T16-25-23.716110.json +++ b/xverse/XVERSE-13B/results_2024-04-02T16-25-23.716110.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.5259372195179259, - "all_grouped_npm": 0.28907130498283157, + "all_grouped_average": 0.543950633289094, + "all_grouped_npm": 0.31587698023754585, "all_grouped": { "enem_challenge": 0.42617214835549333, "bluex": 0.42698191933240615, @@ -45,7 +45,7 @@ "faquad_nli": 0.495072463768116, "hatebr_offensive": 0.7506150372392411, "portuguese_hate_speech": 0.4367778297691163, - "tweetsentbr": 0.48636217182153496 + "tweetsentbr": 0.6484828957620467 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.42617214835549333, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.495072463768116, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7506150372392411, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4367778297691163, - "harness|tweetsentbr|tweetsentbr|None|25": 0.48636217182153496 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6484828957620467 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.42617214835549333, @@ -150,9 +150,9 @@ "main_score": 0.4367778297691163 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.48636217182153496, + "f1_macro,all": 0.6484828957620467, "acc,all": 0.7024875621890547, - "main_score": 0.48636217182153496 + "main_score": 0.6484828957620467 } }, "config_tasks": { diff --git a/xverse/XVERSE-65B/raw_2024-02-19T06-41-25.853474/results.json b/xverse/XVERSE-65B/raw_2024-02-19T06-41-25.853474/results.json index 702a8276dcc50572f9d3e4ab4cfb95e66e0fe1f8..c9185a43eae9d66f69f7659f410bfbfe2064789a 100644 --- a/xverse/XVERSE-65B/raw_2024-02-19T06-41-25.853474/results.json +++ b/xverse/XVERSE-65B/raw_2024-02-19T06-41-25.853474/results.json @@ -1,1324 +1,1324 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.8703715365817846, - "acc,all": 0.8705065359477124, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7327314288775599, - "mse,all": 0.643451797385621, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.37830319888734354, - "acc,exam_id__UNICAMP_2020": 0.34545454545454546, - "acc,exam_id__USP_2021": 0.40384615384615385, - "acc,exam_id__UNICAMP_2023": 0.3488372093023256, - "acc,exam_id__USP_2020": 0.32142857142857145, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__USP_2024": 0.34146341463414637, - "acc,exam_id__UNICAMP_2019": 0.42, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2018": 0.2962962962962963, - "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, - "acc,exam_id__USP_2022": 0.3469387755102041, - "acc,exam_id__UNICAMP_2018": 0.37037037037037035, - "acc,exam_id__USP_2023": 0.38636363636363635, - "acc,exam_id__USP_2019": 0.225, - "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.3449965010496851, - "acc,exam_id__2015": 0.4369747899159664, - "acc,exam_id__2016_2": 0.22764227642276422, - "acc,exam_id__2012": 0.3275862068965517, - "acc,exam_id__2022": 0.3458646616541353, - "acc,exam_id__2013": 0.3425925925925926, - "acc,exam_id__2014": 0.3119266055045872, - "acc,exam_id__2016": 0.4049586776859504, - "acc,exam_id__2009": 0.2956521739130435, - "acc,exam_id__2023": 0.35555555555555557, - "acc,exam_id__2010": 0.36752136752136755, - "acc,exam_id__2017": 0.3793103448275862, - "acc,exam_id__2011": 0.3418803418803419 - }, - "faquad_nli": { - "f1_macro,all": 0.4891375750750751, - "acc,all": 0.7938461538461539, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7725655170589092, - "acc,all": 0.7807142857142857 - }, - "oab_exams": { - "acc,all": 0.2911161731207289, - "acc,exam_id__2017-22": 0.3375, - "acc,exam_id__2015-18": 0.275, - "acc,exam_id__2016-20": 0.225, - "acc,exam_id__2013-12": 0.2625, - "acc,exam_id__2012-07": 0.35, - "acc,exam_id__2018-25": 0.275, - "acc,exam_id__2014-13": 0.325, - "acc,exam_id__2012-06a": 0.25, - "acc,exam_id__2016-19": 0.2564102564102564, - "acc,exam_id__2015-16": 0.275, - "acc,exam_id__2011-03": 0.2828282828282828, - "acc,exam_id__2012-09": 0.3116883116883117, - "acc,exam_id__2012-08": 0.3625, - "acc,exam_id__2017-23": 0.275, - "acc,exam_id__2014-15": 0.38461538461538464, - "acc,exam_id__2016-21": 0.3375, - "acc,exam_id__2011-05": 0.2625, - "acc,exam_id__2010-01": 0.24705882352941178, - "acc,exam_id__2010-02": 0.27, - "acc,exam_id__2012-06": 0.225, - "acc,exam_id__2014-14": 0.3375, - "acc,exam_id__2013-10": 0.325, - "acc,exam_id__2011-04": 0.2875, - "acc,exam_id__2013-11": 0.35, - "acc,exam_id__2015-17": 0.2564102564102564, - "acc,exam_id__2016-20a": 0.225, - "acc,exam_id__2017-24": 0.3, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.48470765594061804, - "acc,all": 0.4864864864864865 - }, - "tweetsentbr": { - "f1_macro,all": 0.46996729873073895, - "acc,all": 0.7164179104477612, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.8703715365817846, + "acc,all": 0.8705065359477124, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7327314288775599, + "mse,all": 0.643451797385621, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.37830319888734354, + "acc,exam_id__UNICAMP_2020": 0.34545454545454546, + "acc,exam_id__USP_2021": 0.40384615384615385, + "acc,exam_id__UNICAMP_2023": 0.3488372093023256, + "acc,exam_id__USP_2020": 0.32142857142857145, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__USP_2024": 0.34146341463414637, + "acc,exam_id__UNICAMP_2019": 0.42, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2018": 0.2962962962962963, + "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131, + "acc,exam_id__USP_2022": 0.3469387755102041, + "acc,exam_id__UNICAMP_2018": 0.37037037037037035, + "acc,exam_id__USP_2023": 0.38636363636363635, + "acc,exam_id__USP_2019": 0.225, + "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.3449965010496851, + "acc,exam_id__2015": 0.4369747899159664, + "acc,exam_id__2016_2": 0.22764227642276422, + "acc,exam_id__2012": 0.3275862068965517, + "acc,exam_id__2022": 0.3458646616541353, + "acc,exam_id__2013": 0.3425925925925926, + "acc,exam_id__2014": 0.3119266055045872, + "acc,exam_id__2016": 0.4049586776859504, + "acc,exam_id__2009": 0.2956521739130435, + "acc,exam_id__2023": 0.35555555555555557, + "acc,exam_id__2010": 0.36752136752136755, + "acc,exam_id__2017": 0.3793103448275862, + "acc,exam_id__2011": 0.3418803418803419 + }, + "faquad_nli": { + "f1_macro,all": 0.4891375750750751, + "acc,all": 0.7938461538461539, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7725655170589092, + "acc,all": 0.7807142857142857 + }, + "oab_exams": { + "acc,all": 0.2911161731207289, + "acc,exam_id__2017-22": 0.3375, + "acc,exam_id__2015-18": 0.275, + "acc,exam_id__2016-20": 0.225, + "acc,exam_id__2013-12": 0.2625, + "acc,exam_id__2012-07": 0.35, + "acc,exam_id__2018-25": 0.275, + "acc,exam_id__2014-13": 0.325, + "acc,exam_id__2012-06a": 0.25, + "acc,exam_id__2016-19": 0.2564102564102564, + "acc,exam_id__2015-16": 0.275, + "acc,exam_id__2011-03": 0.2828282828282828, + "acc,exam_id__2012-09": 0.3116883116883117, + "acc,exam_id__2012-08": 0.3625, + "acc,exam_id__2017-23": 0.275, + "acc,exam_id__2014-15": 0.38461538461538464, + "acc,exam_id__2016-21": 0.3375, + "acc,exam_id__2011-05": 0.2625, + "acc,exam_id__2010-01": 0.24705882352941178, + "acc,exam_id__2010-02": 0.27, + "acc,exam_id__2012-06": 0.225, + "acc,exam_id__2014-14": 0.3375, + "acc,exam_id__2013-10": 0.325, + "acc,exam_id__2011-04": 0.2875, + "acc,exam_id__2013-11": 0.35, + "acc,exam_id__2015-17": 0.2564102564102564, + "acc,exam_id__2016-20a": 0.225, + "acc,exam_id__2017-24": 0.3, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.48470765594061804, + "acc,all": 0.4864864864864865 + }, + "tweetsentbr": { + "f1_macro,all": 0.6266230649743186, + "acc,all": 0.7164179104477612, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia-temp/tweetsentbr", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "862006098672459776", - "861612241703063552", - "861833257087848448", - "861283345476571138", - "861283000335695873", - "862139461274152962", - "862139468702265344", - "862006107702734848", - "862004354458537984", - "861833322925883392", - "861603063190171648", - "862139462716989440", - "862005877355810818", - "861751885862244353", - "862045180261695489", - "862004252499226630", - "862023970828292097", - "862041752127107074", - "862034961863503872", - "861293756548608001", - "861993527575695360", - "862003099355021315", - "862002404086206467", - "861282989602463744", - "862139454399668229", - "862139463769743361", - "862054906689138688", - "862139446535360513", - "861997363744911361", - "862057988898648065", - "861329080083521536", - "861286289034838016", - "861833050526806017", - "861300658565255169", - "861989003821813760", - "861682750398631938", - "861283275716907008", - "861283402523267072", - "861873108147466240", - "862139462138171392", - "861284090271715333", - "862139446149427201", - "861629109331525633", - "861721698609098753", - "862139453124612096", - "861283339482914816", - "861282466291748867", - "862055346759749632", - "862003019860389891", - "862140698346344449", - "862084376280092672", - "862003058708017152", - "862000677345787904", - "862029129310502913", - "862005822376882178", - "861969836297134085", - "861302955361927168", - "862064949451005953", - "861282589541355520", - "862005476858486784", - "862004684411850757", - "862139471101349890", - "862139467146170368", - "862139475098558465", - "862140706550403072", - "861282777001537536", - "862003184147079169", - "861283410656059394", - "861283417857691649", - "861888778922856448", - "861655860812099585", - "861834248063504384", - "862005210935382017", - "861282716930760704", - "861287082433622022" - ], - "id_column": "id" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia-temp/tweetsentbr", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "862006098672459776", + "861612241703063552", + "861833257087848448", + "861283345476571138", + "861283000335695873", + "862139461274152962", + "862139468702265344", + "862006107702734848", + "862004354458537984", + "861833322925883392", + "861603063190171648", + "862139462716989440", + "862005877355810818", + "861751885862244353", + "862045180261695489", + "862004252499226630", + "862023970828292097", + "862041752127107074", + "862034961863503872", + "861293756548608001", + "861993527575695360", + "862003099355021315", + "862002404086206467", + "861282989602463744", + "862139454399668229", + "862139463769743361", + "862054906689138688", + "862139446535360513", + "861997363744911361", + "862057988898648065", + "861329080083521536", + "861286289034838016", + "861833050526806017", + "861300658565255169", + "861989003821813760", + "861682750398631938", + "861283275716907008", + "861283402523267072", + "861873108147466240", + "862139462138171392", + "861284090271715333", + "862139446149427201", + "861629109331525633", + "861721698609098753", + "862139453124612096", + "861283339482914816", + "861282466291748867", + "862055346759749632", + "862003019860389891", + "862140698346344449", + "862084376280092672", + "862003058708017152", + "862000677345787904", + "862029129310502913", + "862005822376882178", + "861969836297134085", + "861302955361927168", + "862064949451005953", + "861282589541355520", + "862005476858486784", + "862004684411850757", + "862139471101349890", + "862139467146170368", + "862139475098558465", + "862140706550403072", + "861282777001537536", + "862003184147079169", + "861283410656059394", + "861283417857691649", + "861888778922856448", + "861655860812099585", + "861834248063504384", + "862005210935382017", + "861282716930760704", + "861287082433622022" + ], + "id_column": "id" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 5331, - "non_truncated": 8819, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 9213, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 2, - "accelerate_num_process": null, - "model_sha": "2f5c3c26594741b339d05b5bdf539d3a3583a6ba", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 134159241216, - "model_num_parameters": 66408521728, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 1, - "max_length": 2048, - "max_ctx_length": 2016, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1816.750816993464, - "min_seq_length": 1788, - "max_seq_length": 1911, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 14, - "non_truncated": 2434, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 14, - "mean_seq_length": 1960.750816993464, - "min_seq_length": 1932, - "max_seq_length": 2055, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 14.994281045751634 - }, - "bluex": { - "sample_size": 719, - "truncated": 538, - "non_truncated": 181, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 926, - "mean_seq_length": 2223.4937413073712, - "min_seq_length": 1720, - "max_seq_length": 3265, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 1.7121001390820585 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 906, - "non_truncated": 523, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1015, - "mean_seq_length": 2123.4933519944016, - "min_seq_length": 1758, - "max_seq_length": 3702, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.289713086074178 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 650, - "non_truncated": 0, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 763, - "mean_seq_length": 2124.6569230769232, - "min_seq_length": 2050, - "max_seq_length": 2290, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 13.826153846153845 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1624.8157142857142, - "min_seq_length": 1596, - "max_seq_length": 1960, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "model_meta": { + "truncated": 5331, + "non_truncated": 8819, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 9213, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 2, + "accelerate_num_process": null, + "model_sha": "2f5c3c26594741b339d05b5bdf539d3a3583a6ba", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 134159241216, + "model_num_parameters": 66408521728, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 1, + "max_length": 2048, + "max_ctx_length": 2016, + "max_gen_toks": 32 }, - "oab_exams": { - "sample_size": 2195, - "truncated": 362, - "non_truncated": 1833, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 363, - "mean_seq_length": 1856.5257403189066, - "min_seq_length": 1486, - "max_seq_length": 2576, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.834624145785877 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1816.750816993464, + "min_seq_length": 1788, + "max_seq_length": 1911, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 14, + "non_truncated": 2434, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 14, + "mean_seq_length": 1960.750816993464, + "min_seq_length": 1932, + "max_seq_length": 2055, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 14.994281045751634 + }, + "bluex": { + "sample_size": 719, + "truncated": 538, + "non_truncated": 181, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 926, + "mean_seq_length": 2223.4937413073712, + "min_seq_length": 1720, + "max_seq_length": 3265, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 1.7121001390820585 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 906, + "non_truncated": 523, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1015, + "mean_seq_length": 2123.4933519944016, + "min_seq_length": 1758, + "max_seq_length": 3702, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.289713086074178 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 650, + "non_truncated": 0, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 763, + "mean_seq_length": 2124.6569230769232, + "min_seq_length": 2050, + "max_seq_length": 2290, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 13.826153846153845 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1624.8157142857142, + "min_seq_length": 1596, + "max_seq_length": 1960, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 362, + "non_truncated": 1833, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 363, + "mean_seq_length": 1856.5257403189066, + "min_seq_length": 1486, + "max_seq_length": 2576, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.834624145785877 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 851, + "non_truncated": 0, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 3182, + "mean_seq_length": 2300.578143360752, + "min_seq_length": 2254, + "max_seq_length": 2343, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 21.26086956521739 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 2010, + "non_truncated": 0, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 2950, + "mean_seq_length": 2088.7278606965174, + "min_seq_length": 2061, + "max_seq_length": 2157, + "max_ctx_length": 2016, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 23.53233830845771 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 851, - "non_truncated": 0, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 3182, - "mean_seq_length": 2300.578143360752, - "min_seq_length": 2254, - "max_seq_length": 2343, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 21.26086956521739 + "config": { + "model": "huggingface", + "model_args": "pretrained=xverse/XVERSE-65B,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=4096", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 2010, - "non_truncated": 0, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 2950, - "mean_seq_length": 2088.7278606965174, - "min_seq_length": 2061, - "max_seq_length": 2157, - "max_ctx_length": 2016, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 23.53233830845771 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=xverse/XVERSE-65B,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=4096", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "804df15" + "git_hash": "804df15" } \ No newline at end of file diff --git a/xverse/XVERSE-65B/results_2024-02-19T06-41-25.853474.json b/xverse/XVERSE-65B/results_2024-02-19T06-41-25.853474.json index 6ba4c765cb9bdb48f1d2f311b5e0691215d86342..c6fd12dfdf6c0fa888528022707977e241ebcb73 100644 --- a/xverse/XVERSE-65B/results_2024-02-19T06-41-25.853474.json +++ b/xverse/XVERSE-65B/results_2024-02-19T06-41-25.853474.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.537099653924716, - "all_grouped_npm": 0.30395711342849147, + "all_grouped_average": 0.5545058501740026, + "all_grouped_npm": 0.3298591911804061, "all_grouped": { "enem_challenge": 0.3449965010496851, "bluex": 0.37830319888734354, @@ -45,7 +45,7 @@ "faquad_nli": 0.4891375750750751, "hatebr_offensive": 0.7725655170589092, "portuguese_hate_speech": 0.48470765594061804, - "tweetsentbr": 0.46996729873073895 + "tweetsentbr": 0.6266230649743186 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.3449965010496851, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.4891375750750751, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7725655170589092, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.48470765594061804, - "harness|tweetsentbr|tweetsentbr|None|25": 0.46996729873073895 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6266230649743186 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.3449965010496851, @@ -150,9 +150,9 @@ "main_score": 0.48470765594061804 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.46996729873073895, + "f1_macro,all": 0.6266230649743186, "acc,all": 0.7164179104477612, - "main_score": 0.46996729873073895 + "main_score": 0.6266230649743186 } }, "config_tasks": { diff --git a/yunconglong/DARE_TIES_13B/raw_2024-05-18T23-38-30.233980/results.json b/yunconglong/DARE_TIES_13B/raw_2024-05-18T23-38-30.233980/results.json index b6dc716ac7356aedef2fa03afa168a9ff89635ce..42ca1962f469a12144a4abd86d08b3e2d3de128e 100644 --- a/yunconglong/DARE_TIES_13B/raw_2024-05-18T23-38-30.233980/results.json +++ b/yunconglong/DARE_TIES_13B/raw_2024-05-18T23-38-30.233980/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9252440876057113, - "acc,all": 0.9252450980392157, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.773934922061916, - "mse,all": 0.43568218954248367, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5229485396383866, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__USP_2023": 0.6136363636363636, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2019": 0.475, - "acc,exam_id__UNICAMP_2019": 0.56, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__USP_2024": 0.6829268292682927, - "acc,exam_id__UNICAMP_2024": 0.4444444444444444, - "acc,exam_id__UNICAMP_2020": 0.5454545454545454, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__USP_2021": 0.5192307692307693, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6375087473757872, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2016_2": 0.5772357723577236, - "acc,exam_id__2013": 0.7129629629629629, - "acc,exam_id__2016": 0.6033057851239669, - "acc,exam_id__2015": 0.6050420168067226, - "acc,exam_id__2011": 0.6837606837606838, - "acc,exam_id__2014": 0.6146788990825688, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2010": 0.6495726495726496, - "acc,exam_id__2022": 0.6240601503759399, - "acc,exam_id__2023": 0.6666666666666666, - "acc,exam_id__2012": 0.6379310344827587 - }, - "faquad_nli": { - "f1_macro,all": 0.778378038405646, - "acc,all": 0.8415384615384616, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7573095601893222, - "acc,all": 0.7692857142857142 - }, - "oab_exams": { - "acc,all": 0.42277904328018223, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2015-18": 0.45, - "acc,exam_id__2013-10": 0.4, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2012-06": 0.4875, - "acc,exam_id__2013-11": 0.4375, - "acc,exam_id__2016-19": 0.5256410256410257, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2014-13": 0.3, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2017-24": 0.35, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2010-02": 0.48, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2014-15": 0.46153846153846156, - "acc,exam_id__2016-20": 0.425, - "acc,exam_id__2010-01": 0.36470588235294116, - "acc,exam_id__2016-20a": 0.4, - "acc,exam_id__2015-16": 0.375, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6998127730393177, - "acc,all": 0.7708578143360753 - }, - "tweetsentbr": { - "f1_macro,all": 0.5009745754861856, - "acc,all": 0.7054726368159204, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9252440876057113, + "acc,all": 0.9252450980392157, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.773934922061916, + "mse,all": 0.43568218954248367, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5229485396383866, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__USP_2023": 0.6136363636363636, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2019": 0.475, + "acc,exam_id__UNICAMP_2019": 0.56, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__USP_2024": 0.6829268292682927, + "acc,exam_id__UNICAMP_2024": 0.4444444444444444, + "acc,exam_id__UNICAMP_2020": 0.5454545454545454, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__USP_2021": 0.5192307692307693, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6375087473757872, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2016_2": 0.5772357723577236, + "acc,exam_id__2013": 0.7129629629629629, + "acc,exam_id__2016": 0.6033057851239669, + "acc,exam_id__2015": 0.6050420168067226, + "acc,exam_id__2011": 0.6837606837606838, + "acc,exam_id__2014": 0.6146788990825688, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2010": 0.6495726495726496, + "acc,exam_id__2022": 0.6240601503759399, + "acc,exam_id__2023": 0.6666666666666666, + "acc,exam_id__2012": 0.6379310344827587 + }, + "faquad_nli": { + "f1_macro,all": 0.778378038405646, + "acc,all": 0.8415384615384616, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7573095601893222, + "acc,all": 0.7692857142857142 + }, + "oab_exams": { + "acc,all": 0.42277904328018223, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2015-18": 0.45, + "acc,exam_id__2013-10": 0.4, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2012-06": 0.4875, + "acc,exam_id__2013-11": 0.4375, + "acc,exam_id__2016-19": 0.5256410256410257, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2014-13": 0.3, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2017-24": 0.35, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2010-02": 0.48, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2014-15": 0.46153846153846156, + "acc,exam_id__2016-20": 0.425, + "acc,exam_id__2010-01": 0.36470588235294116, + "acc,exam_id__2016-20a": 0.4, + "acc,exam_id__2015-16": 0.375, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6998127730393177, + "acc,all": 0.7708578143360753 + }, + "tweetsentbr": { + "f1_macro,all": 0.6679661006482475, + "acc,all": 0.7054726368159204, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "74c6e4fbd272c9d897e8c93ee7de8a234f61900f", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 26295156736, - "model_num_parameters": 12879138816, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "74c6e4fbd272c9d897e8c93ee7de8a234f61900f", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 26295156736, + "model_num_parameters": 12879138816, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=yunconglong/DARE_TIES_13B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=yunconglong/DARE_TIES_13B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/yunconglong/DARE_TIES_13B/results_2024-05-18T23-38-30.233980.json b/yunconglong/DARE_TIES_13B/results_2024-05-18T23-38-30.233980.json index b4650c4b398e0f172d9bf572d2d02db70bf64d2c..71bfae564f23667ef6ed82731fab3eeb14a7a169 100644 --- a/yunconglong/DARE_TIES_13B/results_2024-05-18T23-38-30.233980.json +++ b/yunconglong/DARE_TIES_13B/results_2024-05-18T23-38-30.233980.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.668765587453606, - "all_grouped_npm": 0.5082871298611003, + "all_grouped_average": 0.6873202013605018, + "all_grouped_npm": 0.5358981624606476, "all_grouped": { "enem_challenge": 0.6375087473757872, "bluex": 0.5229485396383866, @@ -45,7 +45,7 @@ "faquad_nli": 0.778378038405646, "hatebr_offensive": 0.7573095601893222, "portuguese_hate_speech": 0.6998127730393177, - "tweetsentbr": 0.5009745754861856 + "tweetsentbr": 0.6679661006482475 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6375087473757872, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.778378038405646, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7573095601893222, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6998127730393177, - "harness|tweetsentbr|tweetsentbr|None|25": 0.5009745754861856 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6679661006482475 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6375087473757872, @@ -150,9 +150,9 @@ "main_score": 0.6998127730393177 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.5009745754861856, + "f1_macro,all": 0.6679661006482475, "acc,all": 0.7054726368159204, - "main_score": 0.5009745754861856 + "main_score": 0.6679661006482475 } }, "config_tasks": { diff --git a/yunconglong/MoE_13B_DPO/raw_2024-07-04T01-29-22.264056/results.json b/yunconglong/MoE_13B_DPO/raw_2024-07-04T01-29-22.264056/results.json index 375a6dd5e1a95de3f17420b1720f17b26f02ae9b..edf8df9aaf538f69e7e0d5399ce3fccc93844c39 100644 --- a/yunconglong/MoE_13B_DPO/raw_2024-07-04T01-29-22.264056/results.json +++ b/yunconglong/MoE_13B_DPO/raw_2024-07-04T01-29-22.264056/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9256518082422204, - "acc,all": 0.9256535947712419, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7785639339764251, - "mse,all": 0.42804330065359475, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5312934631432545, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__UNICAMP_2022": 0.5384615384615384, - "acc,exam_id__USP_2021": 0.5, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__UNICAMP_2019": 0.58, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__USP_2023": 0.6590909090909091, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__UNICAMP_2020": 0.5818181818181818, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__USP_2019": 0.475, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6445066480055983, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2012": 0.6637931034482759, - "acc,exam_id__2016_2": 0.5853658536585366, - "acc,exam_id__2013": 0.7129629629629629, - "acc,exam_id__2023": 0.674074074074074, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2011": 0.6923076923076923, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2022": 0.631578947368421, - "acc,exam_id__2010": 0.6495726495726496, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2015": 0.6050420168067226 - }, - "faquad_nli": { - "f1_macro,all": 0.7736071481198676, - "acc,all": 0.84, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.754808735771493, - "acc,all": 0.7671428571428571 - }, - "oab_exams": { - "acc,all": 0.42642369020501136, - "acc,exam_id__2012-06": 0.475, - "acc,exam_id__2013-12": 0.4625, - "acc,exam_id__2012-07": 0.375, - "acc,exam_id__2013-11": 0.45, - "acc,exam_id__2015-18": 0.475, - "acc,exam_id__2012-06a": 0.35, - "acc,exam_id__2018-25": 0.475, - "acc,exam_id__2011-03": 0.37373737373737376, - "acc,exam_id__2010-02": 0.48, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2013-10": 0.425, - "acc,exam_id__2011-05": 0.425, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2017-22": 0.5375, - "acc,exam_id__2012-08": 0.375, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2011-04": 0.425, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2010-01": 0.3764705882352941, - "acc,exam_id__2017-23": 0.425, - "acc,exam_id__2014-13": 0.3, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2016-20a": 0.4125, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2016-19": 0.5128205128205128, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6924101916738147, - "acc,all": 0.763807285546416 - }, - "tweetsentbr": { - "f1_macro,all": 0.4980056714808651, - "acc,all": 0.7024875621890547, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9256518082422204, + "acc,all": 0.9256535947712419, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7785639339764251, + "mse,all": 0.42804330065359475, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5312934631432545, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__UNICAMP_2022": 0.5384615384615384, + "acc,exam_id__USP_2021": 0.5, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__UNICAMP_2019": 0.58, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__USP_2023": 0.6590909090909091, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__UNICAMP_2020": 0.5818181818181818, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__USP_2019": 0.475, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6445066480055983, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2012": 0.6637931034482759, + "acc,exam_id__2016_2": 0.5853658536585366, + "acc,exam_id__2013": 0.7129629629629629, + "acc,exam_id__2023": 0.674074074074074, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2011": 0.6923076923076923, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2022": 0.631578947368421, + "acc,exam_id__2010": 0.6495726495726496, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2015": 0.6050420168067226 + }, + "faquad_nli": { + "f1_macro,all": 0.7736071481198676, + "acc,all": 0.84, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.754808735771493, + "acc,all": 0.7671428571428571 + }, + "oab_exams": { + "acc,all": 0.42642369020501136, + "acc,exam_id__2012-06": 0.475, + "acc,exam_id__2013-12": 0.4625, + "acc,exam_id__2012-07": 0.375, + "acc,exam_id__2013-11": 0.45, + "acc,exam_id__2015-18": 0.475, + "acc,exam_id__2012-06a": 0.35, + "acc,exam_id__2018-25": 0.475, + "acc,exam_id__2011-03": 0.37373737373737376, + "acc,exam_id__2010-02": 0.48, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2013-10": 0.425, + "acc,exam_id__2011-05": 0.425, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2017-22": 0.5375, + "acc,exam_id__2012-08": 0.375, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2011-04": 0.425, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2010-01": 0.3764705882352941, + "acc,exam_id__2017-23": 0.425, + "acc,exam_id__2014-13": 0.3, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2016-20a": 0.4125, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2016-19": 0.5128205128205128, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6924101916738147, + "acc,all": 0.763807285546416 + }, + "tweetsentbr": { + "f1_macro,all": 0.6640075619744868, + "acc,all": 0.7024875621890547, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "d8d6a47f877fee3e638a158c2bd637c0013ed4e4", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 26295156736, - "model_num_parameters": 12879138816, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "d8d6a47f877fee3e638a158c2bd637c0013ed4e4", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 26295156736, + "model_num_parameters": 12879138816, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=yunconglong/MoE_13B_DPO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=yunconglong/MoE_13B_DPO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "5a13f3e" + "git_hash": "5a13f3e" } \ No newline at end of file diff --git a/yunconglong/MoE_13B_DPO/results_2024-07-04T01-29-22.264056.json b/yunconglong/MoE_13B_DPO/results_2024-07-04T01-29-22.264056.json index ce77e3470c53577b92c42b8e1873b3ff36a257fc..d5cb845793686bab213fdbefc8750aec6366cd18 100644 --- a/yunconglong/MoE_13B_DPO/results_2024-07-04T01-29-22.264056.json +++ b/yunconglong/MoE_13B_DPO/results_2024-07-04T01-29-22.264056.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6694745878465055, - "all_grouped_npm": 0.5080005645559058, + "all_grouped_average": 0.6879192423457969, + "all_grouped_npm": 0.5354479670846132, "all_grouped": { "enem_challenge": 0.6445066480055983, "bluex": 0.5312934631432545, @@ -45,7 +45,7 @@ "faquad_nli": 0.7736071481198676, "hatebr_offensive": 0.754808735771493, "portuguese_hate_speech": 0.6924101916738147, - "tweetsentbr": 0.4980056714808651 + "tweetsentbr": 0.6640075619744868 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6445066480055983, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7736071481198676, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.754808735771493, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6924101916738147, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4980056714808651 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6640075619744868 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6445066480055983, @@ -150,9 +150,9 @@ "main_score": 0.6924101916738147 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4980056714808651, + "f1_macro,all": 0.6640075619744868, "acc,all": 0.7024875621890547, - "main_score": 0.4980056714808651 + "main_score": 0.6640075619744868 } }, "config_tasks": { diff --git a/yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B/raw_2024-05-18T10-42-26.679204/results.json b/yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B/raw_2024-05-18T10-42-26.679204/results.json index 6e00d01d69e72a0967d3283d191c0dc180b960ec..f68fb80b922f2d75a11acfebcfbd6b3dd0c09aa8 100644 --- a/yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B/raw_2024-05-18T10-42-26.679204/results.json +++ b/yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B/raw_2024-05-18T10-42-26.679204/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9256504186602871, - "acc,all": 0.9256535947712419, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7758187578008335, - "mse,all": 0.43297385620915035, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.5340751043115438, - "acc,exam_id__UNICAMP_2024": 0.4666666666666667, - "acc,exam_id__USP_2019": 0.45, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, - "acc,exam_id__USP_2021": 0.5192307692307693, - "acc,exam_id__UNICAMP_2020": 0.5636363636363636, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2018": 0.4444444444444444, - "acc,exam_id__USP_2022": 0.5102040816326531, - "acc,exam_id__UNICAMP_2018": 0.42592592592592593, - "acc,exam_id__UNICAMP_2023": 0.6046511627906976, - "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, - "acc,exam_id__USP_2020": 0.48214285714285715, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__USP_2023": 0.6590909090909091, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6438068579426172, - "acc,exam_id__2012": 0.646551724137931, - "acc,exam_id__2017": 0.6724137931034483, - "acc,exam_id__2013": 0.7129629629629629, - "acc,exam_id__2016": 0.6115702479338843, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2015": 0.6050420168067226, - "acc,exam_id__2022": 0.6240601503759399, - "acc,exam_id__2014": 0.6330275229357798, - "acc,exam_id__2010": 0.6410256410256411, - "acc,exam_id__2009": 0.6173913043478261, - "acc,exam_id__2016_2": 0.5853658536585366, - "acc,exam_id__2023": 0.674074074074074 - }, - "faquad_nli": { - "f1_macro,all": 0.7697183994826833, - "acc,all": 0.8415384615384616, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.7573095601893222, - "acc,all": 0.7692857142857142 - }, - "oab_exams": { - "acc,all": 0.42369020501138954, - "acc,exam_id__2014-15": 0.47435897435897434, - "acc,exam_id__2012-07": 0.3625, - "acc,exam_id__2016-20a": 0.4125, - "acc,exam_id__2015-16": 0.3625, - "acc,exam_id__2016-21": 0.375, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2014-13": 0.2875, - "acc,exam_id__2010-02": 0.47, - "acc,exam_id__2012-06": 0.4625, - "acc,exam_id__2018-25": 0.4625, - "acc,exam_id__2011-04": 0.4125, - "acc,exam_id__2012-08": 0.35, - "acc,exam_id__2015-18": 0.45, - "acc,exam_id__2011-05": 0.4625, - "acc,exam_id__2012-09": 0.3246753246753247, - "acc,exam_id__2017-24": 0.3625, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2016-20": 0.425, - "acc,exam_id__2013-12": 0.45, - "acc,exam_id__2016-19": 0.5, - "acc,exam_id__2014-14": 0.5125, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2010-01": 0.38823529411764707, - "acc,exam_id__2011-03": 0.3838383838383838, - "acc,exam_id__2015-17": 0.5512820512820513, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2013-11": 0.45, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6996182106324738, - "acc,all": 0.7696827262044653 - }, - "tweetsentbr": { - "f1_macro,all": 0.4994192521845676, - "acc,all": 0.7039800995024875, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9256504186602871, + "acc,all": 0.9256535947712419, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7758187578008335, + "mse,all": 0.43297385620915035, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.5340751043115438, + "acc,exam_id__UNICAMP_2024": 0.4666666666666667, + "acc,exam_id__USP_2019": 0.45, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824, + "acc,exam_id__USP_2021": 0.5192307692307693, + "acc,exam_id__UNICAMP_2020": 0.5636363636363636, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2018": 0.4444444444444444, + "acc,exam_id__USP_2022": 0.5102040816326531, + "acc,exam_id__UNICAMP_2018": 0.42592592592592593, + "acc,exam_id__UNICAMP_2023": 0.6046511627906976, + "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652, + "acc,exam_id__USP_2020": 0.48214285714285715, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__USP_2023": 0.6590909090909091, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6438068579426172, + "acc,exam_id__2012": 0.646551724137931, + "acc,exam_id__2017": 0.6724137931034483, + "acc,exam_id__2013": 0.7129629629629629, + "acc,exam_id__2016": 0.6115702479338843, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2015": 0.6050420168067226, + "acc,exam_id__2022": 0.6240601503759399, + "acc,exam_id__2014": 0.6330275229357798, + "acc,exam_id__2010": 0.6410256410256411, + "acc,exam_id__2009": 0.6173913043478261, + "acc,exam_id__2016_2": 0.5853658536585366, + "acc,exam_id__2023": 0.674074074074074 + }, + "faquad_nli": { + "f1_macro,all": 0.7697183994826833, + "acc,all": 0.8415384615384616, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.7573095601893222, + "acc,all": 0.7692857142857142 + }, + "oab_exams": { + "acc,all": 0.42369020501138954, + "acc,exam_id__2014-15": 0.47435897435897434, + "acc,exam_id__2012-07": 0.3625, + "acc,exam_id__2016-20a": 0.4125, + "acc,exam_id__2015-16": 0.3625, + "acc,exam_id__2016-21": 0.375, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2014-13": 0.2875, + "acc,exam_id__2010-02": 0.47, + "acc,exam_id__2012-06": 0.4625, + "acc,exam_id__2018-25": 0.4625, + "acc,exam_id__2011-04": 0.4125, + "acc,exam_id__2012-08": 0.35, + "acc,exam_id__2015-18": 0.45, + "acc,exam_id__2011-05": 0.4625, + "acc,exam_id__2012-09": 0.3246753246753247, + "acc,exam_id__2017-24": 0.3625, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2016-20": 0.425, + "acc,exam_id__2013-12": 0.45, + "acc,exam_id__2016-19": 0.5, + "acc,exam_id__2014-14": 0.5125, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2010-01": 0.38823529411764707, + "acc,exam_id__2011-03": 0.3838383838383838, + "acc,exam_id__2015-17": 0.5512820512820513, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2013-11": 0.45, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6996182106324738, + "acc,all": 0.7696827262044653 + }, + "tweetsentbr": { + "f1_macro,all": 0.6658923362460901, + "acc,all": 0.7039800995024875, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "dd66c98fa56ab95e321e591f123081ab4296a032", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 26295156736, - "model_num_parameters": 12879138816, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "dd66c98fa56ab95e321e591f123081ab4296a032", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 26295156736, + "model_num_parameters": 12879138816, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B/results_2024-05-18T10-42-26.679204.json b/yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B/results_2024-05-18T10-42-26.679204.json index ac4ad4eee84565ba4f1bd52b02ec8a2af9f130bd..8a25315c66926dd1e044f439eaffe2b155fc7eca 100644 --- a/yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B/results_2024-05-18T10-42-26.679204.json +++ b/yunconglong/Truthful_DPO_TomGrc_FusionNet_7Bx2_MoE_13B/results_2024-05-18T10-42-26.679204.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6699007518017466, - "all_grouped_npm": 0.509124297926221, + "all_grouped_average": 0.6883977611419156, + "all_grouped_npm": 0.5366496094443299, "all_grouped": { "enem_challenge": 0.6438068579426172, "bluex": 0.5340751043115438, @@ -45,7 +45,7 @@ "faquad_nli": 0.7697183994826833, "hatebr_offensive": 0.7573095601893222, "portuguese_hate_speech": 0.6996182106324738, - "tweetsentbr": 0.4994192521845676 + "tweetsentbr": 0.6658923362460901 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6438068579426172, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7697183994826833, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7573095601893222, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6996182106324738, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4994192521845676 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6658923362460901 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6438068579426172, @@ -150,9 +150,9 @@ "main_score": 0.6996182106324738 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4994192521845676, + "f1_macro,all": 0.6658923362460901, "acc,all": 0.7039800995024875, - "main_score": 0.4994192521845676 + "main_score": 0.6658923362460901 } }, "config_tasks": { diff --git a/zhengr/MixTAO-7Bx2-MoE-v8.1/raw_2024-05-17T19-51-43.191836/results.json b/zhengr/MixTAO-7Bx2-MoE-v8.1/raw_2024-05-17T19-51-43.191836/results.json index 316acaaa63517dc49a1d8e5dcba10b5068f9db5f..d25aec284ebbbbfbf4a8f0e26d5a21b57102787a 100644 --- a/zhengr/MixTAO-7Bx2-MoE-v8.1/raw_2024-05-17T19-51-43.191836/results.json +++ b/zhengr/MixTAO-7Bx2-MoE-v8.1/raw_2024-05-17T19-51-43.191836/results.json @@ -1,1244 +1,1244 @@ { - "results": { - "assin2_rte": { - "f1_macro,all": 0.9211590650705042, - "acc,all": 0.9211601307189542, - "alias": "assin2_rte" - }, - "assin2_sts": { - "pearson,all": 0.7751642830429548, - "mse,all": 0.43529820261437907, - "alias": "assin2_sts" - }, - "bluex": { - "acc,all": 0.541029207232267, - "acc,exam_id__UNICAMP_2024": 0.4888888888888889, - "acc,exam_id__USP_2019": 0.4, - "acc,exam_id__UNICAMP_2019": 0.6, - "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, - "acc,exam_id__USP_2021": 0.5, - "acc,exam_id__UNICAMP_2020": 0.6, - "acc,exam_id__UNICAMP_2022": 0.5641025641025641, - "acc,exam_id__USP_2018": 0.46296296296296297, - "acc,exam_id__USP_2022": 0.4897959183673469, - "acc,exam_id__UNICAMP_2018": 0.48148148148148145, - "acc,exam_id__UNICAMP_2023": 0.627906976744186, - "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, - "acc,exam_id__USP_2020": 0.4642857142857143, - "acc,exam_id__USP_2024": 0.7073170731707317, - "acc,exam_id__USP_2023": 0.6590909090909091, - "alias": "bluex" - }, - "enem_challenge": { - "alias": "enem", - "acc,all": 0.6403079076277117, - "acc,exam_id__2012": 0.6379310344827587, - "acc,exam_id__2017": 0.6637931034482759, - "acc,exam_id__2013": 0.7129629629629629, - "acc,exam_id__2016": 0.5785123966942148, - "acc,exam_id__2011": 0.7094017094017094, - "acc,exam_id__2015": 0.5966386554621849, - "acc,exam_id__2022": 0.6090225563909775, - "acc,exam_id__2014": 0.6238532110091743, - "acc,exam_id__2010": 0.6752136752136753, - "acc,exam_id__2009": 0.6347826086956522, - "acc,exam_id__2016_2": 0.5772357723577236, - "acc,exam_id__2023": 0.674074074074074 - }, - "faquad_nli": { - "f1_macro,all": 0.7769607843137254, - "acc,all": 0.8276923076923077, - "alias": "faquad_nli" - }, - "hatebr_offensive": { - "alias": "hatebr_offensive_binary", - "f1_macro,all": 0.8200809063593439, - "acc,all": 0.8235714285714286 - }, - "oab_exams": { - "acc,all": 0.42915717539863324, - "acc,exam_id__2014-15": 0.44871794871794873, - "acc,exam_id__2012-07": 0.425, - "acc,exam_id__2016-20a": 0.375, - "acc,exam_id__2015-16": 0.375, - "acc,exam_id__2016-21": 0.4, - "acc,exam_id__2013-10": 0.4125, - "acc,exam_id__2014-13": 0.2875, - "acc,exam_id__2010-02": 0.48, - "acc,exam_id__2012-06": 0.4375, - "acc,exam_id__2018-25": 0.4875, - "acc,exam_id__2011-04": 0.4, - "acc,exam_id__2012-08": 0.3875, - "acc,exam_id__2015-18": 0.4375, - "acc,exam_id__2011-05": 0.5, - "acc,exam_id__2012-09": 0.35064935064935066, - "acc,exam_id__2017-24": 0.375, - "acc,exam_id__2012-06a": 0.3625, - "acc,exam_id__2016-20": 0.4125, - "acc,exam_id__2013-12": 0.4375, - "acc,exam_id__2016-19": 0.5128205128205128, - "acc,exam_id__2014-14": 0.5, - "acc,exam_id__2017-22": 0.525, - "acc,exam_id__2010-01": 0.4, - "acc,exam_id__2011-03": 0.36363636363636365, - "acc,exam_id__2015-17": 0.5769230769230769, - "acc,exam_id__2017-23": 0.45, - "acc,exam_id__2013-11": 0.475, - "alias": "oab_exams" - }, - "portuguese_hate_speech": { - "alias": "portuguese_hate_speech_binary", - "f1_macro,all": 0.6746822513364406, - "acc,all": 0.7062279670975323 - }, - "tweetsentbr": { - "f1_macro,all": 0.4909817936544455, - "acc,all": 0.7019900497512438, - "alias": "tweetsentbr" - } - }, - "configs": { - "assin2_rte": { - "task": "assin2_rte", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", - "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", - "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true + "results": { + "assin2_rte": { + "f1_macro,all": 0.9211590650705042, + "acc,all": 0.9211601307189542, + "alias": "assin2_rte" }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true + "assin2_sts": { + "pearson,all": 0.7751642830429548, + "mse,all": 0.43529820261437907, + "alias": "assin2_sts" + }, + "bluex": { + "acc,all": 0.541029207232267, + "acc,exam_id__UNICAMP_2024": 0.4888888888888889, + "acc,exam_id__USP_2019": 0.4, + "acc,exam_id__UNICAMP_2019": 0.6, + "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373, + "acc,exam_id__USP_2021": 0.5, + "acc,exam_id__UNICAMP_2020": 0.6, + "acc,exam_id__UNICAMP_2022": 0.5641025641025641, + "acc,exam_id__USP_2018": 0.46296296296296297, + "acc,exam_id__USP_2022": 0.4897959183673469, + "acc,exam_id__UNICAMP_2018": 0.48148148148148145, + "acc,exam_id__UNICAMP_2023": 0.627906976744186, + "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478, + "acc,exam_id__USP_2020": 0.4642857142857143, + "acc,exam_id__USP_2024": 0.7073170731707317, + "acc,exam_id__USP_2023": 0.6590909090909091, + "alias": "bluex" + }, + "enem_challenge": { + "alias": "enem", + "acc,all": 0.6403079076277117, + "acc,exam_id__2012": 0.6379310344827587, + "acc,exam_id__2017": 0.6637931034482759, + "acc,exam_id__2013": 0.7129629629629629, + "acc,exam_id__2016": 0.5785123966942148, + "acc,exam_id__2011": 0.7094017094017094, + "acc,exam_id__2015": 0.5966386554621849, + "acc,exam_id__2022": 0.6090225563909775, + "acc,exam_id__2014": 0.6238532110091743, + "acc,exam_id__2010": 0.6752136752136753, + "acc,exam_id__2009": 0.6347826086956522, + "acc,exam_id__2016_2": 0.5772357723577236, + "acc,exam_id__2023": 0.674074074074074 + }, + "faquad_nli": { + "f1_macro,all": 0.7769607843137254, + "acc,all": 0.8276923076923077, + "alias": "faquad_nli" + }, + "hatebr_offensive": { + "alias": "hatebr_offensive_binary", + "f1_macro,all": 0.8200809063593439, + "acc,all": 0.8235714285714286 + }, + "oab_exams": { + "acc,all": 0.42915717539863324, + "acc,exam_id__2014-15": 0.44871794871794873, + "acc,exam_id__2012-07": 0.425, + "acc,exam_id__2016-20a": 0.375, + "acc,exam_id__2015-16": 0.375, + "acc,exam_id__2016-21": 0.4, + "acc,exam_id__2013-10": 0.4125, + "acc,exam_id__2014-13": 0.2875, + "acc,exam_id__2010-02": 0.48, + "acc,exam_id__2012-06": 0.4375, + "acc,exam_id__2018-25": 0.4875, + "acc,exam_id__2011-04": 0.4, + "acc,exam_id__2012-08": 0.3875, + "acc,exam_id__2015-18": 0.4375, + "acc,exam_id__2011-05": 0.5, + "acc,exam_id__2012-09": 0.35064935064935066, + "acc,exam_id__2017-24": 0.375, + "acc,exam_id__2012-06a": 0.3625, + "acc,exam_id__2016-20": 0.4125, + "acc,exam_id__2013-12": 0.4375, + "acc,exam_id__2016-19": 0.5128205128205128, + "acc,exam_id__2014-14": 0.5, + "acc,exam_id__2017-22": 0.525, + "acc,exam_id__2010-01": 0.4, + "acc,exam_id__2011-03": 0.36363636363636365, + "acc,exam_id__2015-17": 0.5769230769230769, + "acc,exam_id__2017-23": 0.45, + "acc,exam_id__2013-11": 0.475, + "alias": "oab_exams" + }, + "portuguese_hate_speech": { + "alias": "portuguese_hate_speech_binary", + "f1_macro,all": 0.6746822513364406, + "acc,all": 0.7062279670975323 + }, + "tweetsentbr": { + "f1_macro,all": 0.6546423915392606, + "acc,all": 0.7019900497512438, + "alias": "tweetsentbr" } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + }, + "configs": { + "assin2_rte": { + "task": "assin2_rte", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", + "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", + "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } + }, + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "assin2_sts": { - "task": "assin2_sts", - "group": [ - "pt_benchmark", - "assin2" - ], - "dataset_path": "assin2", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", - "doc_to_target": "", - "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 1, - 3251, - 2, - 3252, - 3, - 4, - 5, - 6, - 3253, - 7, - 3254, - 3255, - 3256, - 8, - 9, - 10, - 3257, - 11, - 3258, - 12, - 13, - 14, - 15, - 3259, - 3260, - 3261, - 3262, - 3263, - 16, - 17, - 3264, - 18, - 3265, - 3266, - 3267, - 19, - 20, - 3268, - 3269, - 21, - 3270, - 3271, - 22, - 3272, - 3273, - 23, - 3274, - 24, - 25, - 3275 - ], - "id_column": "sentence_pair_id" - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "pearson", - "aggregation": "pearsonr", - "higher_is_better": true }, - { - "metric": "mse", - "aggregation": "mean_squared_error", - "higher_is_better": false - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "number_filter", - "type": "float", - "range_min": 1.0, - "range_max": 5.0, - "on_outside_range": "clip", - "fallback": 5.0 + "assin2_sts": { + "task": "assin2_sts", + "group": [ + "pt_benchmark", + "assin2" + ], + "dataset_path": "assin2", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", + "doc_to_target": "", + "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 1, + 3251, + 2, + 3252, + 3, + 4, + 5, + 6, + 3253, + 7, + 3254, + 3255, + 3256, + 8, + 9, + 10, + 3257, + 11, + 3258, + 12, + 13, + 14, + 15, + 3259, + 3260, + 3261, + 3262, + 3263, + 16, + 17, + 3264, + 18, + 3265, + 3266, + 3267, + 19, + 20, + 3268, + 3269, + 21, + 3270, + 3271, + 22, + 3272, + 3273, + 23, + 3274, + 24, + 25, + 3275 + ], + "id_column": "sentence_pair_id" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "bluex": { - "task": "bluex", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia-temp/BLUEX_without_images", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "USP_2018_3", - "UNICAMP_2018_2", - "USP_2018_35", - "UNICAMP_2018_16", - "USP_2018_89" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "pearson", + "aggregation": "pearsonr", + "higher_is_better": true + }, + { + "metric": "mse", + "aggregation": "mean_squared_error", + "higher_is_better": false + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "number_filter", + "type": "float", + "range_min": 1.0, + "range_max": 5.0, + "on_outside_range": "clip", + "fallback": 5.0 + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 + } + }, + "bluex": { + "task": "bluex", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia-temp/BLUEX_without_images", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "USP_2018_3", + "UNICAMP_2018_2", + "USP_2018_35", + "UNICAMP_2018_16", + "USP_2018_89" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "enem_challenge": { - "task": "enem_challenge", - "task_alias": "enem", - "group": [ - "pt_benchmark", - "vestibular" - ], - "dataset_path": "eduagarcia/enem_challenge", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2022_21", - "2022_88", - "2022_143" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" - }, - { - "function": "remove_accents" + }, + "enem_challenge": { + "task": "enem_challenge", + "task_alias": "enem", + "group": [ + "pt_benchmark", + "vestibular" + ], + "dataset_path": "eduagarcia/enem_challenge", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2022_21", + "2022_88", + "2022_143" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D", - "E" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", - "\\b([ABCDE])\\.", - "\\b([ABCDE]) ?[.):-]", - "\\b([ABCDE])$", - "\\b([ABCDE])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D", + "E" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", + "\\b([ABCDE])\\.", + "\\b([ABCDE]) ?[.):-]", + "\\b([ABCDE])$", + "\\b([ABCDE])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.1 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.1 - } - }, - "faquad_nli": { - "task": "faquad_nli", - "group": [ - "pt_benchmark" - ], - "dataset_path": "ruanchaves/faquad-nli", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", - "doc_to_target": "{{['Não', 'Sim'][label]}}", - "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n", - "sampler_config": { - "fewshot_indices": [ - 1893, - 949, - 663, - 105, - 1169, - 2910, - 2227, - 2813, - 974, - 558, - 1503, - 1958, - 2918, - 601, - 1560, - 984, - 2388, - 995, - 2233, - 1982, - 165, - 2788, - 1312, - 2285, - 522, - 1113, - 1670, - 323, - 236, - 1263, - 1562, - 2519, - 1049, - 432, - 1167, - 1394, - 2022, - 2551, - 2194, - 2187, - 2282, - 2816, - 108, - 301, - 1185, - 1315, - 1420, - 2436, - 2322, - 766 - ] - } - }, - "num_fewshot": 15, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "faquad_nli": { + "task": "faquad_nli", + "group": [ + "pt_benchmark" + ], + "dataset_path": "ruanchaves/faquad-nli", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", + "doc_to_target": "{{['Não', 'Sim'][label]}}", + "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n", + "sampler_config": { + "fewshot_indices": [ + 1893, + 949, + 663, + 105, + 1169, + 2910, + 2227, + 2813, + 974, + 558, + 1503, + 1958, + 2918, + 601, + 1560, + 984, + 2388, + 995, + 2233, + 1982, + 165, + 2788, + 1312, + 2285, + 522, + 1113, + 1670, + 323, + 236, + 1263, + 1562, + 2519, + 1049, + 432, + 1167, + 1394, + 2022, + 2551, + 2194, + 2187, + 2282, + 2816, + 108, + 301, + 1185, + 1315, + 1420, + 2436, + 2322, + 766 + ] + } }, - { - "function": "take_first" + "num_fewshot": 15, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.1 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.1 - } - }, - "hatebr_offensive": { - "task": "hatebr_offensive", - "task_alias": "hatebr_offensive_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "HateBR_offensive_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 48, - 44, - 36, - 20, - 3511, - 88, - 3555, - 16, - 56, - 3535, - 60, - 40, - 3527, - 4, - 76, - 3579, - 3523, - 3551, - 68, - 3503, - 84, - 3539, - 64, - 3599, - 80, - 3563, - 3559, - 3543, - 3547, - 3587, - 3595, - 3575, - 3567, - 3591, - 24, - 96, - 92, - 3507, - 52, - 72, - 8, - 3571, - 3515, - 3519, - 3531, - 28, - 32, - 0, - 12, - 3583 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "hatebr_offensive": { + "task": "hatebr_offensive", + "task_alias": "hatebr_offensive_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "HateBR_offensive_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 48, + 44, + 36, + 20, + 3511, + 88, + 3555, + 16, + 56, + 3535, + 60, + 40, + 3527, + 4, + 76, + 3579, + 3523, + 3551, + 68, + 3503, + 84, + 3539, + 64, + 3599, + 80, + 3563, + 3559, + 3543, + 3547, + 3587, + 3595, + 3575, + 3567, + 3591, + 24, + 96, + 92, + 3507, + 52, + 72, + 8, + 3571, + 3515, + 3519, + 3531, + 28, + 32, + 0, + 12, + 3583 + ], + "id_column": "idx" + } }, - { - "function": "take_first" - } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "oab_exams": { - "task": "oab_exams", - "group": [ - "legal_benchmark", - "pt_benchmark" - ], - "dataset_path": "eduagarcia/oab_exams", - "test_split": "train", - "fewshot_split": "train", - "doc_to_text": "", - "doc_to_target": "{{answerKey}}", - "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - "2010-01_1", - "2010-01_11", - "2010-01_13", - "2010-01_23", - "2010-01_26", - "2010-01_28", - "2010-01_38", - "2010-01_48", - "2010-01_58", - "2010-01_68", - "2010-01_76", - "2010-01_83", - "2010-01_85", - "2010-01_91", - "2010-01_99" - ], - "id_column": "id", - "exclude_from_task": true - } - }, - "num_fewshot": 3, - "metric_list": [ - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "normalize_spaces" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "remove_accents" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 + } + }, + "oab_exams": { + "task": "oab_exams", + "group": [ + "legal_benchmark", + "pt_benchmark" + ], + "dataset_path": "eduagarcia/oab_exams", + "test_split": "train", + "fewshot_split": "train", + "doc_to_text": "", + "doc_to_target": "{{answerKey}}", + "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + "2010-01_1", + "2010-01_11", + "2010-01_13", + "2010-01_23", + "2010-01_26", + "2010-01_28", + "2010-01_38", + "2010-01_48", + "2010-01_58", + "2010-01_68", + "2010-01_76", + "2010-01_83", + "2010-01_85", + "2010-01_91", + "2010-01_99" + ], + "id_column": "id", + "exclude_from_task": true + } }, - { - "function": "find_choices", - "choices": [ - "A", - "B", - "C", - "D" - ], - "regex_patterns": [ - "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", - "\\b([ABCD])\\.", - "\\b([ABCD]) ?[.):-]", - "\\b([ABCD])$", - "\\b([ABCD])\\b" - ] + "num_fewshot": 3, + "metric_list": [ + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "normalize_spaces" + }, + { + "function": "remove_accents" + }, + { + "function": "find_choices", + "choices": [ + "A", + "B", + "C", + "D" + ], + "regex_patterns": [ + "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", + "\\b([ABCD])\\.", + "\\b([ABCD]) ?[.):-]", + "\\b([ABCD])$", + "\\b([ABCD])\\b" + ] + }, + { + "function": "take_first" + } + ], + "group_by": { + "column": "exam_id" + } + } + ], + "should_decontaminate": true, + "doc_to_decontamination_query": "", + "metadata": { + "version": 1.5 } - ], - "group_by": { - "column": "exam_id" - } - } - ], - "should_decontaminate": true, - "doc_to_decontamination_query": "", - "metadata": { - "version": 1.5 - } - }, - "portuguese_hate_speech": { - "task": "portuguese_hate_speech", - "task_alias": "portuguese_hate_speech_binary", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/portuguese_benchmark", - "dataset_name": "Portuguese_Hate_Speech_binary", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", - "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "id_sampler", - "sampler_config": { - "id_list": [ - 52, - 50, - 39, - 28, - 3, - 105, - 22, - 25, - 60, - 11, - 66, - 41, - 9, - 4, - 91, - 42, - 7, - 20, - 76, - 1, - 104, - 13, - 67, - 54, - 97, - 27, - 24, - 14, - 16, - 48, - 53, - 40, - 34, - 49, - 32, - 119, - 114, - 2, - 58, - 83, - 18, - 36, - 5, - 6, - 10, - 35, - 38, - 0, - 21, - 46 - ], - "id_column": "idx" - } - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Sim", - "Não" - ] + "portuguese_hate_speech": { + "task": "portuguese_hate_speech", + "task_alias": "portuguese_hate_speech_binary", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/portuguese_benchmark", + "dataset_name": "Portuguese_Hate_Speech_binary", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", + "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "id_sampler", + "sampler_config": { + "id_list": [ + 52, + 50, + 39, + 28, + 3, + 105, + 22, + 25, + 60, + 11, + 66, + 41, + 9, + 4, + 91, + 42, + 7, + 20, + 76, + 1, + 104, + 13, + 67, + 54, + 97, + 27, + 24, + 14, + 16, + 48, + 53, + 40, + 34, + 49, + 32, + 119, + 114, + 2, + 58, + 83, + 18, + 36, + 5, + 6, + 10, + 35, + 38, + 0, + 21, + 46 + ], + "id_column": "idx" + } + }, + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] }, - { - "function": "take_first" + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Sim", + "Não" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] - } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - }, - "tweetsentbr": { - "task": "tweetsentbr", - "group": [ - "pt_benchmark" - ], - "dataset_path": "eduagarcia/tweetsentbr_fewshot", - "test_split": "test", - "fewshot_split": "train", - "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", - "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", - "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", - "target_delimiter": " ", - "fewshot_delimiter": "\n\n", - "fewshot_config": { - "sampler": "first_n" - }, - "num_fewshot": 25, - "metric_list": [ - { - "metric": "f1_macro", - "aggregation": "f1_macro", - "higher_is_better": true }, - { - "metric": "acc", - "aggregation": "acc", - "higher_is_better": true - } - ], - "output_type": "generate_until", - "generation_kwargs": { - "max_gen_toks": 32, - "do_sample": false, - "temperature": 0.0, - "top_k": null, - "top_p": null, - "until": [ - "\n\n" - ] - }, - "repeats": 1, - "filter_list": [ - { - "name": "all", - "filter": [ - { - "function": "find_similar_label", - "labels": [ - "Positivo", - "Neutro", - "Negativo" - ] + "tweetsentbr": { + "task": "tweetsentbr", + "group": [ + "pt_benchmark" + ], + "dataset_path": "eduagarcia/tweetsentbr_fewshot", + "test_split": "test", + "fewshot_split": "train", + "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", + "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", + "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", + "target_delimiter": " ", + "fewshot_delimiter": "\n\n", + "fewshot_config": { + "sampler": "first_n" }, - { - "function": "take_first" + "num_fewshot": 25, + "metric_list": [ + { + "metric": "f1_macro", + "aggregation": "f1_macro", + "higher_is_better": true + }, + { + "metric": "acc", + "aggregation": "acc", + "higher_is_better": true + } + ], + "output_type": "generate_until", + "generation_kwargs": { + "max_gen_toks": 32, + "do_sample": false, + "temperature": 0.0, + "top_k": null, + "top_p": null, + "until": [ + "\n\n" + ] + }, + "repeats": 1, + "filter_list": [ + { + "name": "all", + "filter": [ + { + "function": "find_similar_label", + "labels": [ + "Positivo", + "Neutro", + "Negativo" + ] + }, + { + "function": "take_first" + } + ] + } + ], + "should_decontaminate": false, + "metadata": { + "version": 1.0 } - ] } - ], - "should_decontaminate": false, - "metadata": { - "version": 1.0 - } - } - }, - "versions": { - "assin2_rte": 1.1, - "assin2_sts": 1.1, - "bluex": 1.1, - "enem_challenge": 1.1, - "faquad_nli": 1.1, - "hatebr_offensive": 1.0, - "oab_exams": 1.5, - "portuguese_hate_speech": 1.0, - "tweetsentbr": 1.0 - }, - "n-shot": { - "assin2_rte": 15, - "assin2_sts": 15, - "bluex": 3, - "enem_challenge": 3, - "faquad_nli": 15, - "hatebr_offensive": 25, - "oab_exams": 3, - "portuguese_hate_speech": 25, - "tweetsentbr": 25 - }, - "model_meta": { - "truncated": 1, - "non_truncated": 14149, - "padded": 0, - "non_padded": 14150, - "fewshots_truncated": 1, - "has_chat_template": false, - "chat_type": null, - "n_gpus": 1, - "accelerate_num_process": null, - "model_sha": "d6c689e5cf3442d2a4215c24f7ac5c5aa885b2d9", - "model_dtype": "torch.bfloat16", - "model_memory_footprint": 26295156736, - "model_num_parameters": 12879138816, - "model_is_loaded_in_4bit": null, - "model_is_loaded_in_8bit": null, - "model_is_quantized": null, - "model_device": "cuda:0", - "batch_size": 32, - "max_length": 2560, - "max_ctx_length": 2528, - "max_gen_toks": 32 - }, - "task_model_meta": { - "assin2_rte": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1369.7455065359477, - "min_seq_length": 1346, - "max_seq_length": 1436, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 - }, - "assin2_sts": { - "sample_size": 2448, - "truncated": 0, - "non_truncated": 2448, - "padded": 0, - "non_padded": 2448, - "fewshots_truncated": 0, - "mean_seq_length": 1593.7455065359477, - "min_seq_length": 1570, - "max_seq_length": 1660, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 }, - "bluex": { - "sample_size": 719, - "truncated": 0, - "non_truncated": 719, - "padded": 0, - "non_padded": 719, - "fewshots_truncated": 0, - "mean_seq_length": 1719.9262865090404, - "min_seq_length": 1343, - "max_seq_length": 2520, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "versions": { + "assin2_rte": 1.1, + "assin2_sts": 1.1, + "bluex": 1.1, + "enem_challenge": 1.1, + "faquad_nli": 1.1, + "hatebr_offensive": 1.0, + "oab_exams": 1.5, + "portuguese_hate_speech": 1.0, + "tweetsentbr": 1.0 }, - "enem_challenge": { - "sample_size": 1429, - "truncated": 1, - "non_truncated": 1428, - "padded": 0, - "non_padded": 1429, - "fewshots_truncated": 1, - "mean_seq_length": 1620.039188243527, - "min_seq_length": 1354, - "max_seq_length": 2618, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 2.9993002099370187 + "n-shot": { + "assin2_rte": 15, + "assin2_sts": 15, + "bluex": 3, + "enem_challenge": 3, + "faquad_nli": 15, + "hatebr_offensive": 25, + "oab_exams": 3, + "portuguese_hate_speech": 25, + "tweetsentbr": 25 }, - "faquad_nli": { - "sample_size": 650, - "truncated": 0, - "non_truncated": 650, - "padded": 0, - "non_padded": 650, - "fewshots_truncated": 0, - "mean_seq_length": 1594.9876923076922, - "min_seq_length": 1539, - "max_seq_length": 1715, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 15.0, - "mean_effective_fewshot_size": 15.0 + "model_meta": { + "truncated": 1, + "non_truncated": 14149, + "padded": 0, + "non_padded": 14150, + "fewshots_truncated": 1, + "has_chat_template": false, + "chat_type": null, + "n_gpus": 1, + "accelerate_num_process": null, + "model_sha": "d6c689e5cf3442d2a4215c24f7ac5c5aa885b2d9", + "model_dtype": "torch.bfloat16", + "model_memory_footprint": 26295156736, + "model_num_parameters": 12879138816, + "model_is_loaded_in_4bit": null, + "model_is_loaded_in_8bit": null, + "model_is_quantized": null, + "model_device": "cuda:0", + "batch_size": 32, + "max_length": 2560, + "max_ctx_length": 2528, + "max_gen_toks": 32 }, - "hatebr_offensive": { - "sample_size": 1400, - "truncated": 0, - "non_truncated": 1400, - "padded": 0, - "non_padded": 1400, - "fewshots_truncated": 0, - "mean_seq_length": 1305.3878571428572, - "min_seq_length": 1282, - "max_seq_length": 1556, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - }, - "oab_exams": { - "sample_size": 2195, - "truncated": 0, - "non_truncated": 2195, - "padded": 0, - "non_padded": 2195, - "fewshots_truncated": 0, - "mean_seq_length": 1365.764464692483, - "min_seq_length": 1099, - "max_seq_length": 1868, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 3.0, - "mean_effective_fewshot_size": 3.0 + "task_model_meta": { + "assin2_rte": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1369.7455065359477, + "min_seq_length": 1346, + "max_seq_length": 1436, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "assin2_sts": { + "sample_size": 2448, + "truncated": 0, + "non_truncated": 2448, + "padded": 0, + "non_padded": 2448, + "fewshots_truncated": 0, + "mean_seq_length": 1593.7455065359477, + "min_seq_length": 1570, + "max_seq_length": 1660, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "bluex": { + "sample_size": 719, + "truncated": 0, + "non_truncated": 719, + "padded": 0, + "non_padded": 719, + "fewshots_truncated": 0, + "mean_seq_length": 1719.9262865090404, + "min_seq_length": 1343, + "max_seq_length": 2520, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "enem_challenge": { + "sample_size": 1429, + "truncated": 1, + "non_truncated": 1428, + "padded": 0, + "non_padded": 1429, + "fewshots_truncated": 1, + "mean_seq_length": 1620.039188243527, + "min_seq_length": 1354, + "max_seq_length": 2618, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 2.9993002099370187 + }, + "faquad_nli": { + "sample_size": 650, + "truncated": 0, + "non_truncated": 650, + "padded": 0, + "non_padded": 650, + "fewshots_truncated": 0, + "mean_seq_length": 1594.9876923076922, + "min_seq_length": 1539, + "max_seq_length": 1715, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 15.0, + "mean_effective_fewshot_size": 15.0 + }, + "hatebr_offensive": { + "sample_size": 1400, + "truncated": 0, + "non_truncated": 1400, + "padded": 0, + "non_padded": 1400, + "fewshots_truncated": 0, + "mean_seq_length": 1305.3878571428572, + "min_seq_length": 1282, + "max_seq_length": 1556, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "oab_exams": { + "sample_size": 2195, + "truncated": 0, + "non_truncated": 2195, + "padded": 0, + "non_padded": 2195, + "fewshots_truncated": 0, + "mean_seq_length": 1365.764464692483, + "min_seq_length": 1099, + "max_seq_length": 1868, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 3.0, + "mean_effective_fewshot_size": 3.0 + }, + "portuguese_hate_speech": { + "sample_size": 851, + "truncated": 0, + "non_truncated": 851, + "padded": 0, + "non_padded": 851, + "fewshots_truncated": 0, + "mean_seq_length": 1806.3360752056403, + "min_seq_length": 1771, + "max_seq_length": 1845, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + }, + "tweetsentbr": { + "sample_size": 2010, + "truncated": 0, + "non_truncated": 2010, + "padded": 0, + "non_padded": 2010, + "fewshots_truncated": 0, + "mean_seq_length": 1552.2492537313433, + "min_seq_length": 1531, + "max_seq_length": 1647, + "max_ctx_length": 2528, + "max_gen_toks": 32, + "mean_original_fewshots_size": 25.0, + "mean_effective_fewshot_size": 25.0 + } }, - "portuguese_hate_speech": { - "sample_size": 851, - "truncated": 0, - "non_truncated": 851, - "padded": 0, - "non_padded": 851, - "fewshots_truncated": 0, - "mean_seq_length": 1806.3360752056403, - "min_seq_length": 1771, - "max_seq_length": 1845, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 + "config": { + "model": "huggingface", + "model_args": "pretrained=zhengr/MixTAO-7Bx2-MoE-v8.1,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", + "batch_size": "auto", + "batch_sizes": [], + "device": null, + "use_cache": null, + "limit": [ + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + "bootstrap_iters": 0, + "gen_kwargs": null }, - "tweetsentbr": { - "sample_size": 2010, - "truncated": 0, - "non_truncated": 2010, - "padded": 0, - "non_padded": 2010, - "fewshots_truncated": 0, - "mean_seq_length": 1552.2492537313433, - "min_seq_length": 1531, - "max_seq_length": 1647, - "max_ctx_length": 2528, - "max_gen_toks": 32, - "mean_original_fewshots_size": 25.0, - "mean_effective_fewshot_size": 25.0 - } - }, - "config": { - "model": "huggingface", - "model_args": "pretrained=zhengr/MixTAO-7Bx2-MoE-v8.1,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", - "batch_size": "auto", - "batch_sizes": [], - "device": null, - "use_cache": null, - "limit": [ - null, - null, - null, - null, - null, - null, - null, - null, - null - ], - "bootstrap_iters": 0, - "gen_kwargs": null - }, - "git_hash": "51e0e5e" + "git_hash": "51e0e5e" } \ No newline at end of file diff --git a/zhengr/MixTAO-7Bx2-MoE-v8.1/results_2024-05-17T19-51-43.191836.json b/zhengr/MixTAO-7Bx2-MoE-v8.1/results_2024-05-17T19-51-43.191836.json index 3669ca8d0cd1d305936eb30b3ec06e44e5b5e03e..0c41bea460c1cfd7abd9ef3b217213e8ba26f37d 100644 --- a/zhengr/MixTAO-7Bx2-MoE-v8.1/results_2024-05-17T19-51-43.191836.json +++ b/zhengr/MixTAO-7Bx2-MoE-v8.1/results_2024-05-17T19-51-43.191836.json @@ -34,8 +34,8 @@ "eval_version": "1.1.0" }, "results": { - "all_grouped_average": 0.6743914860040029, - "all_grouped_npm": 0.5180898414638916, + "all_grouped_average": 0.6925759968800934, + "all_grouped_npm": 0.545150125505693, "all_grouped": { "enem_challenge": 0.6403079076277117, "bluex": 0.541029207232267, @@ -45,7 +45,7 @@ "faquad_nli": 0.7769607843137254, "hatebr_offensive": 0.8200809063593439, "portuguese_hate_speech": 0.6746822513364406, - "tweetsentbr": 0.4909817936544455 + "tweetsentbr": 0.6546423915392606 }, "all": { "harness|enem_challenge|enem_challenge|None|3": 0.6403079076277117, @@ -56,7 +56,7 @@ "harness|faquad_nli|faquad_nli|None|15": 0.7769607843137254, "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8200809063593439, "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6746822513364406, - "harness|tweetsentbr|tweetsentbr|None|25": 0.4909817936544455 + "harness|tweetsentbr|tweetsentbr|None|25": 0.6546423915392606 }, "harness|enem_challenge|enem_challenge|None|3": { "acc,all": 0.6403079076277117, @@ -150,9 +150,9 @@ "main_score": 0.6746822513364406 }, "harness|tweetsentbr|tweetsentbr|None|25": { - "f1_macro,all": 0.4909817936544455, + "f1_macro,all": 0.6546423915392606, "acc,all": 0.7019900497512438, - "main_score": 0.4909817936544455 + "main_score": 0.6546423915392606 } }, "config_tasks": {